From 1cafa85f4f58ce9f56b56ae765cfbbad2ef66ff4 Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Mon, 29 Mar 2021 09:12:55 -0300 Subject: [PATCH 01/26] RF: updated simple eyetracking demo Demo now gets display device info from psychopy window passed to launchHubServer --- .../demos/coder/iohub/eyetracking/simple.py | 196 +++++++++--------- psychopy/iohub/changes.txt | 10 +- 2 files changed, 108 insertions(+), 98 deletions(-) diff --git a/psychopy/demos/coder/iohub/eyetracking/simple.py b/psychopy/demos/coder/iohub/eyetracking/simple.py index 835340b2cbb..b0476a8d7d3 100644 --- a/psychopy/demos/coder/iohub/eyetracking/simple.py +++ b/psychopy/demos/coder/iohub/eyetracking/simple.py @@ -9,11 +9,12 @@ from psychopy import core, visual from psychopy.iohub import launchHubServer, EventConstants + # Eye tracker to use ('mouse', 'eyelink', 'gazepoint', or 'tobii') TRACKER = 'mouse' eyetracker_config = dict(name='tracker') -devices_config = {'Display': {'reporting_unit_type': 'pix', 'device_number': 0}} +devices_config = {} if TRACKER == 'eyelink': eyetracker_config['model_name'] = 'EYELINK 1000 DESKTOP' eyetracker_config['simulation_mode'] = False @@ -28,104 +29,109 @@ devices_config['eyetracker.hw.mouse.EyeTracker'] = eyetracker_config else: print("{} is not a valid TRACKER name; please use 'mouse', 'eyelink', 'gazepoint', or 'tobii'.".format(TRACKER)) + core.quit() # Number if 'trials' to run in demo TRIAL_COUNT = 2 # Maximum trial time / time timeout T_MAX = 60.0 - -if devices_config: - # Since no experiment or session code is given, no iohub hdf5 file - # will be saved, but device events are still available at runtime. - io = launchHubServer(**devices_config) - - # Get some iohub devices for future access. - keyboard = io.getDevice('keyboard') - display = io.getDevice('display') - tracker = io.getDevice('tracker') - - # print("display: ", display.getCoordinateType()) - - # run eyetracker calibration - tracker.runSetupProcedure() - - win = visual.Window(display.getPixelResolution(), - units=display.getCoordinateType(), - fullscr=True, - allowGUI=False - ) - - win.setMouseVisible(True) - - gaze_ok_region = visual.Circle(win, lineColor='black', radius=300, units='pix') - - gaze_dot = visual.GratingStim(win, tex=None, mask='gauss', pos=(0, 0), - size=(40, 40), color='green', units='pix') - - text_stim_str = 'Eye Position: %.2f, %.2f. In Region: %s\n' - text_stim_str += 'Press space key to start next trial.' - missing_gpos_str = 'Eye Position: MISSING. In Region: No\n' - missing_gpos_str += 'Press space key to start next trial.' - text_stim = visual.TextStim(win, text=text_stim_str, - pos=[0, 0], height=24, - color='black', units='pix', - wrapWidth=win.size[0] * .9) - - # Run Trials..... - t = 0 - while t < TRIAL_COUNT: - io.clearEvents() - tracker.setRecordingState(True) - run_trial = True - tstart_time = core.getTime() - while run_trial is True: - # Get the latest gaze position in dispolay coord space.. - gpos = tracker.getLastGazePosition() - for evt in tracker.getEvents(): - if evt.type != EventConstants.MONOCULAR_EYE_SAMPLE: - print(evt) - # Update stim based on gaze position - valid_gaze_pos = isinstance(gpos, (tuple, list)) - gaze_in_region = valid_gaze_pos and gaze_ok_region.contains(gpos) - if valid_gaze_pos: - # If we have a gaze position from the tracker, update gc stim - # and text stim. - if gaze_in_region: - gaze_in_region = 'Yes' - else: - gaze_in_region = 'No' - text_stim.text = text_stim_str % (gpos[0], gpos[1], gaze_in_region) - - gaze_dot.setPos(gpos) +win = visual.Window((1920, 1080), + units='pix', + fullscr=True, + allowGUI=False, + monitor='55w_60dist' + ) + +win.setMouseVisible(False) + + +# Since no experiment or session code is given, no iohub hdf5 file +# will be saved, but device events are still available at runtime. +io = launchHubServer(window=win, **devices_config) + + +# Get some iohub devices for future access. +keyboard = io.getDevice('keyboard') +display = io.getDevice('display') +tracker = io.getDevice('tracker') + +win.winHandle.minimize() # minimize the PsychoPy window + +# run eyetracker calibration +tracker.runSetupProcedure() + +win.winHandle.maximize() # maximize the PsychoPy window +win.winHandle.activate() + +gaze_ok_region = visual.Circle(win, lineColor='black', radius=300, units='pix') + +gaze_dot = visual.GratingStim(win, tex=None, mask='gauss', pos=(0, 0), + size=(40, 40), color='green', units='pix') + +text_stim_str = 'Eye Position: %.2f, %.2f. In Region: %s\n' +text_stim_str += 'Press space key to start next trial.' +missing_gpos_str = 'Eye Position: MISSING. In Region: No\n' +missing_gpos_str += 'Press space key to start next trial.' +text_stim = visual.TextStim(win, text=text_stim_str, + pos=[0, 0], height=24, + color='black', units='pix', + wrapWidth=win.size[0] * .9) + +# Run Trials..... +t = 0 +while t < TRIAL_COUNT: + io.clearEvents() + tracker.setRecordingState(True) + run_trial = True + tstart_time = core.getTime() + while run_trial is True: + # Get the latest gaze position in dispolay coord space.. + gpos = tracker.getLastGazePosition() + for evt in tracker.getEvents(): + if evt.type != EventConstants.MONOCULAR_EYE_SAMPLE: + print(evt) + # Update stim based on gaze position + valid_gaze_pos = isinstance(gpos, (tuple, list)) + gaze_in_region = valid_gaze_pos and gaze_ok_region.contains(gpos) + if valid_gaze_pos: + # If we have a gaze position from the tracker, update gc stim + # and text stim. + if gaze_in_region: + gaze_in_region = 'Yes' else: - # Otherwise just update text stim - text_stim.text = missing_gpos_str - - # Redraw stim - gaze_ok_region.draw() - text_stim.draw() - if valid_gaze_pos: - gaze_dot.draw() - - # Display updated stim on screen. - flip_time = win.flip() - - # Check any new keyboard char events for a space key. - # If one is found, set the trial end variable. - # - if keyboard.getPresses(keys=' '): - run_trial = False - elif core.getTime()-tstart_time > T_MAX: - run_trial = False - win.flip() - # Current Trial is Done - # Stop eye data recording - tracker.setRecordingState(False) - t += 1 - - # All Trials are done - # End experiment - tracker.setConnectionState(False) - - io.quit() + gaze_in_region = 'No' + text_stim.text = text_stim_str % (gpos[0], gpos[1], gaze_in_region) + + gaze_dot.setPos(gpos) + else: + # Otherwise just update text stim + text_stim.text = missing_gpos_str + + # Redraw stim + gaze_ok_region.draw() + text_stim.draw() + if valid_gaze_pos: + gaze_dot.draw() + + # Display updated stim on screen. + flip_time = win.flip() + + # Check any new keyboard char events for a space key. + # If one is found, set the trial end variable. + # + if keyboard.getPresses(keys=' '): + run_trial = False + elif core.getTime()-tstart_time > T_MAX: + run_trial = False + win.flip() + # Current Trial is Done + # Stop eye data recording + tracker.setRecordingState(False) + t += 1 + +# All Trials are done +# End experiment +tracker.setConnectionState(False) + +io.quit() core.quit() diff --git a/psychopy/iohub/changes.txt b/psychopy/iohub/changes.txt index e37a04620bd..18077388232 100644 --- a/psychopy/iohub/changes.txt +++ b/psychopy/iohub/changes.txt @@ -7,14 +7,18 @@ Changes made to iohub for 2021.2 Release - Do we need to list every specific setting change? - iohub can no longer be copied out of the psychopy package and used as a stand alone package in your site-packages folder. Change `import iohub` to `import psychopy.iohub` for example. -- Removed most device settings related to device hardware specifics (model_id, manfacture_date, etc) +- Removed most device settings related to device hardware specifics (model_id, manufacturer_date, etc) since they were never really used. - removed use of .iohpid - ** Changed datastore schema: - increased experiment and session data string max length - Added wintab device for Wacom based digital pens. (Windows only) - Added iohub MouseGaze eye tracker simulator. -- Added 'color_space' setting to Display (bridging to remove Display class from iohub) +- Added 'color_space' setting to Display settings. - Eyelink and tobii calibration gfx can now use same color space as window instead of always rgb255. - TODO: Retest all eyetracking examples with all eye trackers on all OS's -- Removed iohub/devices/display/unit_conversions.py. Moving to psychopy monitorutil functions. \ No newline at end of file +- Removed iohub/devices/display/unit_conversions.py. Moving to psychopy monitorutil functions. +- launchHubServer now accepts a psychopy window using the window kwarg. + iohub display info is updated using window information like .monitor, .colorSpace, .units, .screen +- If psychopy window (with monitor config) is passed to launchHubServer, + user can forget about need for iohub Display config. \ No newline at end of file From 53299078d802cd105de017b247cd084bd37c234e Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Mon, 29 Mar 2021 10:29:14 -0300 Subject: [PATCH 02/26] DOC: Added MouseGaze eye tracker docs. Added MouseGaze simulated eye tracker to supported eye tracker list of docs. --- .../default_mousegaze_eyetracker.yaml | 41 +++++ docs/source/api/iohub/device/eyetracker.rst | 3 +- .../MouseGaze_Implementation_Notes.rst | 150 ++++++++++++++++++ 3 files changed, 193 insertions(+), 1 deletion(-) create mode 100644 docs/source/api/iohub/device/default_yaml_configs/default_mousegaze_eyetracker.yaml create mode 100644 docs/source/api/iohub/device/eyetracker_interface/MouseGaze_Implementation_Notes.rst diff --git a/docs/source/api/iohub/device/default_yaml_configs/default_mousegaze_eyetracker.yaml b/docs/source/api/iohub/device/default_yaml_configs/default_mousegaze_eyetracker.yaml new file mode 100644 index 00000000000..1760d2d68b4 --- /dev/null +++ b/docs/source/api/iohub/device/default_yaml_configs/default_mousegaze_eyetracker.yaml @@ -0,0 +1,41 @@ +eyetracker.hw.mouse.EyeTracker: + # True = Automatically start reporting events for this device when the experiment starts. + # False = Do not start reporting events for this device until enableEventReporting(True) + # is called for the device. + auto_report_events: False + + # Should eye tracker events be saved to the ioHub DataStore file when the device + # is recording data ? + save_events: True + + # Should eye tracker events be sent to the Experiment process when the device + # is recording data ? + stream_events: True + + # How many eye events (including samples) should be saved in the ioHub event buffer before + # old eye events start being replaced by new events. When the event buffer reaches + # the maximum event length of the buffer defined here, older events will start to be dropped. + event_buffer_length: 1024 + runtime_settings: + # How many samples / second should Mousegaze Generate. + # 50 or 100 hz are supported. + sampling_rate: 50 + + # MouseGaze always generates Monocular Right eye samples. + track_eyes: RIGHT_EYE + + controls: + # Mouse Button used to make a MouseGaze position change. + # LEFT_BUTTON, MIDDLE_BUTTON, RIGHT_BUTTON. + move: RIGHT_BUTTON + + # Mouse Button(s) used to make MouseGaze generate a blink event. + # LEFT_BUTTON, MIDDLE_BUTTON, RIGHT_BUTTON. + blink: [LEFT_BUTTON, RIGHT_BUTTON] + + # Threshold for saccade generation. Specified in visual degrees. + saccade_threshold: 0.5 + + # MouseGaze creates (minimally populated) fixation, saccade, and blink events. + monitor_event_types: [MonocularEyeSampleEvent, FixationStartEvent, FixationEndEvent, SaccadeStartEvent, SaccadeEndEvent, BlinkStartEvent, BlinkEndEvent] + diff --git a/docs/source/api/iohub/device/eyetracker.rst b/docs/source/api/iohub/device/eyetracker.rst index fd27b1a88f5..67c02ca0787 100644 --- a/docs/source/api/iohub/device/eyetracker.rst +++ b/docs/source/api/iohub/device/eyetracker.rst @@ -17,4 +17,5 @@ The following eye trackers are currently supported by iohub. GazePoint SR Research - Tobii \ No newline at end of file + Tobii + MouseGaze (Simulated Eye Tracker) \ No newline at end of file diff --git a/docs/source/api/iohub/device/eyetracker_interface/MouseGaze_Implementation_Notes.rst b/docs/source/api/iohub/device/eyetracker_interface/MouseGaze_Implementation_Notes.rst new file mode 100644 index 00000000000..6d16c73301d --- /dev/null +++ b/docs/source/api/iohub/device/eyetracker_interface/MouseGaze_Implementation_Notes.rst @@ -0,0 +1,150 @@ +########## +MouseGaze +########## + +MouseGaze simulates an eye tracker using the computer Mouse. + +**Platforms:** + +* Windows 7 / 10 +* Linux +* macOS + +**Required Python Version:** + +* Python 3.6 + + +**Supported Models:** + +* Any Mouse. ;) + +Additional Software Requirements +################################# + +None + +EyeTracker Class +################ + +.. autoclass:: psychopy.iohub.devices.eyetracker.hw.mouse.EyeTracker() + :members: runSetupProcedure, setRecordingState, enableEventReporting, isRecordingEnabled, getEvents, clearEvents, getLastSample, getLastGazePosition, getPosition, trackerTime, trackerSec, getConfiguration + +Supported Event Types +##################### + +MouseGaze generates monocular eye samples. A MonocularEyeSampleEvent +is created every 10 or 20 msec depending on the sampling_rate set +for the device. + +The following fields of the MonocularEyeSample event are supported: + +.. autoclass:: psychopy.iohub.devices.eyetracker.BinocularEyeSampleEvent(object) + + .. attribute:: time + + time of event, in sec.msec format, using psychopy timebase. + + .. attribute:: gaze_x + + The horizontal position of MouseGaze on the computer screen, + in Display Coordinate Type Units. Calibration must be done prior + to reading (meaningful) gaze data. + Uses Gazepoint LPOGX field. + + .. attribute:: gaze_y + + The vertical position of MouseGaze on the computer screen, + in Display Coordinate Type Units. Calibration must be done prior + to reading (meaningful) gaze data. + Uses Gazepoint LPOGY field. + + .. attribute:: left_pupil_measure_1 + + MouseGaze pupil diameter, static at 5 mm. + + .. attribute:: status + + Indicates if eye sample contains 'valid' position data. + 0 = MouseGaze position is valid. + 2 = MouseGaze position is missing (in simulated blink). + + +MouseGaze also creates basic fixation, saccade, and blink events +based on mouse event data. + +.. autoclass:: psychopy.iohub.devices.eyetracker.FixationStartEvent(object) + + .. attribute:: time + + time of event, in sec.msec format, using psychopy timebase. + + .. attribute:: eye + + EyeTrackerConstants.RIGHT_EYE. + + .. attribute:: gaze_x + + The horizontal 'eye' position on the computer screen + at the start of the fixation. Units are same as Window. + + + .. attribute:: gaze_y + + The vertical eye position on the computer screen + at the start of the fixation. Units are same as Window. + +.. autoclass:: psychopy.iohub.devices.eyetracker.FixationEndEvent(object) + + .. attribute:: time + + time of event, in sec.msec format, using psychopy timebase. + + .. attribute:: eye + + EyeTrackerConstants.RIGHT_EYE. + + .. attribute:: start_gaze_x + + The horizontal 'eye' position on the computer screen + at the start of the fixation. Units are same as Window. + + + .. attribute:: start_gaze_y + + The vertical 'eye' position on the computer screen + at the start of the fixation. Units are same as Window. + + .. attribute:: end_gaze_x + + The horizontal 'eye' position on the computer screen + at the end of the fixation. Units are same as Window. + + + .. attribute:: end_gaze_y + + The vertical 'eye' position on the computer screen + at the end of the fixation. Units are same as Window. + + .. attribute:: average_gaze_x + + Average calibrated horizontal eye position during the fixation, + specified in Display Units. + + .. attribute:: average_gaze_y + + Average calibrated vertical eye position during the fixation, + specified in Display Units. + + .. attribute:: duration + + Duration of the fixation in sec.msec format. + +Default Device Settings +####################### + +.. literalinclude:: ../default_yaml_configs/default_mousegaze_eyetracker.yaml + :language: yaml + + +**Last Updated:** March, 2021 + From 156e589d02d21ad234b91a4ebf456933279b0422 Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Mon, 29 Mar 2021 11:00:02 -0300 Subject: [PATCH 03/26] FF: iohub Tobii config settings Fixed some of the tobii settings I had changed in DEV. Original options were better. Removed 'minimize_psychopy_win' setting, since it was only used by ExpRuntime class. --- .../default_tobii_eyetracker.yaml | 9 ++------- .../iohub/eyetracking/gcCursor/iohub_config.yaml | 2 +- .../eyetracker_configs/tobii_config.yaml | 7 +++---- .../eyetracker/hw/tobii/default_eyetracker.yaml | 10 +++------- .../hw/tobii/supported_config_settings.yaml | 15 ++++++--------- 5 files changed, 15 insertions(+), 28 deletions(-) diff --git a/docs/source/api/iohub/device/default_yaml_configs/default_tobii_eyetracker.yaml b/docs/source/api/iohub/device/default_yaml_configs/default_tobii_eyetracker.yaml index b7b1f7e6d7f..c1b60d96c59 100644 --- a/docs/source/api/iohub/device/default_yaml_configs/default_tobii_eyetracker.yaml +++ b/docs/source/api/iohub/device/default_yaml_configs/default_tobii_eyetracker.yaml @@ -36,12 +36,7 @@ eyetracker.hw.tobii.EyeTracker: serial_number: calibration: - # Should the PsychoPy Window created by the PsychoPy Process be minimized - # before displaying the Calibration Window created by the ioHub Process. - # - minimize_psychopy_win: False - - # The Tobii ioHub Common Eye Tracker Interface currently support + # The Tobii ioHub Common Eye Tracker Interface currently support # a 3, 5 and 9 point calibration mode. # THREE_POINTS,FIVE_POINTS,NINE_POINTS # @@ -150,7 +145,7 @@ eyetracker.hw.tobii.EyeTracker: runtime_settings: # The supported sampling rates for Tobii are model dependent. - # Using a defualt of 60 Hz, with the assumption it is the most common. + # Using a defualt of 60 Hz. sampling_rate: 60 # Tobii implementation supports BINOCULAR tracking mode only. diff --git a/psychopy/demos/coder/iohub/eyetracking/gcCursor/iohub_config.yaml b/psychopy/demos/coder/iohub/eyetracking/gcCursor/iohub_config.yaml index 1ffb5ed03d4..d98f17b7c3c 100644 --- a/psychopy/demos/coder/iohub/eyetracking/gcCursor/iohub_config.yaml +++ b/psychopy/demos/coder/iohub/eyetracking/gcCursor/iohub_config.yaml @@ -114,7 +114,7 @@ monitor_devices: # # name will be considered as possible candidates for connection. # # If you only have one Tobii system connected to the computer, # # this field can just be left empty. -# model_name: Any Pro Model +# model_name: # # # The serial number of the Tobii device that you wish to connect to. # # If set, only the Tobii system matching that serial number will be diff --git a/psychopy/demos/coder/iohub/eyetracking/selectTracker/eyetracker_configs/tobii_config.yaml b/psychopy/demos/coder/iohub/eyetracking/selectTracker/eyetracker_configs/tobii_config.yaml index 26a11cc8bb9..be930ca955b 100644 --- a/psychopy/demos/coder/iohub/eyetracking/selectTracker/eyetracker_configs/tobii_config.yaml +++ b/psychopy/demos/coder/iohub/eyetracking/selectTracker/eyetracker_configs/tobii_config.yaml @@ -11,7 +11,7 @@ monitor_devices: # The model name of the Tobii device that you wish to connect to can be specified here, # and only Tobii systems matching that model name will be considered as possible candidates for connection. # If you only have one Tobii system connected to the computer, this field can just be left empty. - model_name: Any Pro Model + model_name: # The serial number of the Tobii device that you wish to connect to can be specified here, # and only the Tobii system matching that serial number will be connected to, if found. @@ -70,6 +70,5 @@ monitor_devices: expansion_speed: 30.0 # exapands at 30.0 pix / sec contract_only: True runtime_settings: - # The supported sampling rates for Tobii are model dependent. - # Using a default of 60 Hz, with the assumption it is the most common. - sampling_rate: 120 + # The supported sampling rates for Tobii are model dependent. + sampling_rate: 60 diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/default_eyetracker.yaml b/psychopy/iohub/devices/eyetracker/hw/tobii/default_eyetracker.yaml index 4987cf404b3..24d247bb043 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/default_eyetracker.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/default_eyetracker.yaml @@ -27,7 +27,7 @@ eyetracker.hw.tobii.EyeTracker: # The model name of the Tobii device that you wish to connect to can be specified here, # and only Tobii systems matching that model name will be considered as possible candidates for connection. # If you only have one Tobii system connected to the computer, this field can just be left empty. - model_name: Any Pro Model + model_name: # The serial number of the Tobii device that you wish to connect to can be specified here, # and only the Tobii system matching that serial number will be connected to, if found. @@ -36,11 +36,6 @@ eyetracker.hw.tobii.EyeTracker: serial_number: calibration: - # Should the PsychoPy Window created by the PsychoPy Process be minimized - # before displaying the Calibration Window created by the ioHub Process. - # - minimize_psychopy_win: False - # The Tobii ioHub Common Eye Tracker Interface currently support # a 3, 5 and 9 point calibration mode. # THREE_POINTS,FIVE_POINTS,NINE_POINTS @@ -150,7 +145,8 @@ eyetracker.hw.tobii.EyeTracker: runtime_settings: # The supported sampling rates for Tobii are model dependent. - # Using a default of 60 Hz, with the assumption it is the most common. + # If the sampling rate specified here is not supported by the model being used, + # the Tobii device will continue to use it's current sampling rate. sampling_rate: 60 # Tobii supports BINOCULAR tracking mode only. diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/supported_config_settings.yaml b/psychopy/iohub/devices/eyetracker/hw/tobii/supported_config_settings.yaml index 1620aca88f2..c1611893d12 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/supported_config_settings.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/supported_config_settings.yaml @@ -6,10 +6,9 @@ eyetracker.hw.tobii.EyeTracker: first_char_alpha: True enable: IOHUB_BOOL model_name: - IOHUB_LIST: - valid_values: [Any Pro Model, Nano, Fusion, Spectrum, T120, X120, TX300, X2, X3] - min_length: 1 - max_length: 1 + IOHUB_STRING: + min_length: 0 + max_length: 64 serial_number: IOHUB_STRING: min_length: 0 @@ -24,13 +23,11 @@ eyetracker.hw.tobii.EyeTracker: monitor_event_types: [BinocularEyeSampleEvent,] runtime_settings: sampling_rate: - IOHUB_LIST: - valid_values: [30, 60, 120, 250, 300, 1200] - min_length: 1 - max_length: 1 + IOHUB_INT: + min: 30 + max: 1200 track_eyes: [BINOCULAR,] calibration: - minimize_psychopy_win: IOHUB_BOOL # The Tobii ioHub Common Eye Tracker Interface currently support # a 3, 5 and 9 point calibration mode. # THREE_POINTS,FIVE_POINTS,NINE_POINTS From 39635642b01869c0b5445c576bd73e1024d68a26 Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Wed, 31 Mar 2021 10:53:59 -0300 Subject: [PATCH 04/26] WIP: iohub eye tracker validation procedure WIP currently implemented as a demo. run.py starts the validation. Still working on what simplified user facing api should be. --- .../iohub/eyetracking/validation/posgrid.py | 270 +++++ .../coder/iohub/eyetracking/validation/run.py | 208 ++++ .../iohub/eyetracking/validation/trigger.py | 231 ++++ .../validation/validationroutine.py | 1040 +++++++++++++++++ psychopy/iohub/devices/display/__init__.py | 3 +- 5 files changed, 1750 insertions(+), 2 deletions(-) create mode 100644 psychopy/demos/coder/iohub/eyetracking/validation/posgrid.py create mode 100644 psychopy/demos/coder/iohub/eyetracking/validation/run.py create mode 100644 psychopy/demos/coder/iohub/eyetracking/validation/trigger.py create mode 100644 psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/posgrid.py b/psychopy/demos/coder/iohub/eyetracking/validation/posgrid.py new file mode 100644 index 00000000000..1f5d2b9e07f --- /dev/null +++ b/psychopy/demos/coder/iohub/eyetracking/validation/posgrid.py @@ -0,0 +1,270 @@ +import numpy as np +from psychopy.iohub.client import ioHubConnection + +class PositionGrid(object): + def __init__(self, + bounds=None, + shape=None, # Defines the number of columns and rows of + # positions needed. If shape is an array of + # two elements, it defines the col,row shape + # for position layout. Position count will + # equal rows*cols. If shape is a single + # int, the position grid col,row shape will + # be shape x shape. + posCount=None, # Defines the number of positions to create + # without any col,row position constraint. + leftMargin=None, # Specify the minimum valid horz position. + rightMargin=None, # Limit horz positions to be < max horz + # position minus rightMargin. + topMargin=None, # Limit vert positions to be < max vert + # position minus topMargin. + bottomMargin=None, # Specify the minimum valid vert position. + scale=1.0, # Scale can be one or two numbers, each + # between 0.0 and 1.0. If a tuple is + # provided, it represents the horz, vert + # scale to be applied to window width, + # height. If a single number is + # given, the same scale will be applied to + # both window width and height. The scaled + # window size is centered on the original + # window size to define valid position area. + posList=None, # Provide an existing list of (x,y) + # positions. If posList is provided, the + # shape, posCount, margin and scale arg's + # are ignored. + noiseStd=None, # Add a random shift to each position based + # on a normal distribution with mean = 0.0 + # and sigma equal to noiseStd. Specify + # value based on units being used. + firstposindex=0, # Specify which position in the position + # list should be displayed first. This + # position is not effected by randomization. + repeatFirstPos=True # If the first position in the list should + # be provided as the last position as well, + # set to True. In this case, the number of + # positions returned will be position + # count + 1. False indicated the first + # position should not be repeated. + ): + """ + PositionGrid provides a flexible way to generate a set of x,y position + values within the boundaries of the psychopy window object provided. + + The class provides a set of arguments that represent commonly needed + constraints when creating a target position list, supporting a + variety of position arrangements. + + PositionGrid supports the len() function, and returns the number of + positions generated based on the supplied parameters. If repeatFirstPos + is true, len(posgrid) == number of unique positions + 1 (a repeat of the + first position value). + + PositionGrid is a generator, so the normal way to access the positions from + the class is to use a for loop or with statement: + + posgrid = PositionGrid(....) + for pos in posgrid: + # do something cool with the pos + print(pos) + + :param bounds: + :param shape: + :param posCount: + :param leftMargin: + :param rightMargin: + :param topMargin: + :param bottomMargin: + :param scale: + :param posList: + :param noiseStd: + :param firstposindex: + :param repeatFirstPos: + """ + self.posIndex = 0 + self.positions = None + self.posOffsets = None + + self.bounds = bounds + if self.bounds is None: + self.bounds = ioHubConnection.getActiveConnection().devices.display.getCoordBounds() + print("iohub display units:", ioHubConnection.getActiveConnection().devices.display.getCoordinateType()) + print("Bounds: ", self.bounds) + winSize = self.bounds[2]-self.bounds[0], self.bounds[3]-self.bounds[1] + self.firstposindex = firstposindex + + self.repeatfirstpos = repeatFirstPos + + self.horzStd, self.vertStd = None, None + if noiseStd: + if hasattr(noiseStd, '__len__'): + self.horzStd, self.vertStd = noiseStd + else: + self.horzStd, self.vertStd = noiseStd, noiseStd + + horzScale, vertScale = None, None + if scale: + if hasattr(scale, '__len__'): + horzScale, vertScale = scale + else: + horzScale, vertScale = scale, scale + + rowCount, colCount = None, None + if shape: + if hasattr(shape, '__len__'): + colCount, rowCount = shape + else: + rowCount, colCount = shape, shape + + if posList: + # User has provided the target positions, use posList to set + # self.positions as array of x,y pairs. + if (len(posList) == 2 and len(posList[0]) != 2 and len(posList[0]) == len(posList[1])): + # positions were provided in ((x1,x2,..,xn),(y1,y2,..,yn)) + # format + self.positions = np.column_stack((posList[0], posList[1])) + elif len(posList[0]) == 2: + self.positions = np.asarray(posList) + else: + raise ValueError('PositionGrid posList kwarg must be in ((x1,y1),(x2,y2),..,(xn,yn))' + ' or ((x1,x2,..,xn),(y1,y2,..,yn)) format') + + if self.positions is None and (posCount or (rowCount and colCount)): + # Auto generate position list based on criteria + # provided. + if winSize is not None: + pixw, pixh = winSize + xmin = 0.0 + xmax = 1.0 + ymin = 0.0 + ymax = 1.0 + + if leftMargin: + if leftMargin < pixw: + xmin = leftMargin / pixw + else: + raise ValueError('PositionGrid leftMargin kwarg must be < winSize[0]') + if rightMargin: + if rightMargin < pixw: + xmax = 1.0 - rightMargin / pixw + else: + raise ValueError('PositionGrid rightMargin kwarg must be < winSize[0]') + if topMargin: + if topMargin < pixh: + ymax = 1.0 - topMargin / pixh + else: + raise ValueError('PositionGrid topMargin kwarg must be < winSize[1]') + if bottomMargin: + if bottomMargin < pixh: + ymin = bottomMargin / pixh + else: + raise ValueError('PositionGrid bottomMargin kwarg must be < winSize[1]') + + + if horzScale: + if 0.0 < horzScale <= 1.0: + xmin += (1.0 - horzScale) / 2.0 + xmax -= (1.0 - horzScale) / 2.0 + else: + raise ValueError('PositionGrid horzScale kwarg must be 0.0 > horzScale <= 1.0') + + if vertScale: + if 0.0 < vertScale <= 1.0: + ymin += (1.0 - vertScale) / 2.0 + ymax -= (1.0 - vertScale) / 2.0 + else: + raise ValueError('PositionGrid vertScale kwarg must be 0.0 > vertScale <= 1.0') + if posCount: + colCount = int(np.sqrt(posCount)) + rowCount = colCount + xps = np.random.uniform(xmin, xmax, colCount) * pixw - pixw / 2.0 + yps = np.random.uniform(ymin, ymax, rowCount) * pixh - pixh / 2.0 + else: + xps = np.linspace(xmin, xmax, colCount) * pixw - pixw / 2.0 + yps = np.linspace(ymin, ymax, rowCount) * pixh - pixh / 2.0 + + xps, yps = np.meshgrid(xps, yps) + self.positions = np.column_stack((xps.flatten(), yps.flatten())) + + else: + raise ValueError('PositionGrid posCount kwarg also requires winSize to be provided.') + + if self.positions is None: + raise AttributeError('PositionGrid is unable to generate positions based on the provided kwargs.') + + if self.firstposindex: + fpos = self.positions[self.firstposindex] + self.positions = np.delete(self.positions, self.firstposindex, 0) + self.positions = np.insert(self.positions, 0, fpos, 0) + + self._generatePosOffsets() + + def __len__(self): + if self.repeatfirstpos: + return len(self.positions) + 1 + else: + return len(self.positions) + + def randomize(self): + """ + Randomize the positions within the position list. If a first position + index was been provided, randomization only occurs for positions[1:]. + + This can be called multiple times if the same position list is being used + repeatedly and a random presentation order is needed. + + Each time randomize() is called, if noiseStd is != 0, a new set of + normally distributed offsets are created for the target positions. + """ + if not self.firstposindex: + np.random.shuffle(self.positions) + else: + firstpos = self.positions[0] + self.positions = np.delete(self.positions, 0, 0) + np.random.shuffle(self.positions) + self.positions = np.insert(self.positions, 0, firstpos, 0) + self._generatePosOffsets() + + def _generatePosOffsets(self): + """Create a new set of position displayment 'noise' based on the + noiseStd value given when the object was initialized.""" + horzPosOffsetList = np.zeros((len(self), 1)) + if self.horzStd: + horzPosOffsetList = np.random.normal(0.0, self.horzStd, len(self)) + vertPosOffsetList = np.zeros((len(self), 1)) + if self.vertStd: + vertPosOffsetList = np.random.normal(0.0, self.vertStd, len(self)) + self.posOffsets = np.column_stack((vertPosOffsetList, horzPosOffsetList)) + + def __iter__(self): + return self + + # Python 3 compatibility + def __next__(self): + return self.next() + + def next(self): + """Returns the next position in the list. Usually this method is not + called directly. Instead, positions are accessed by iterating over the + PositionGrid object. + + pos = PositionGrid(....) + + for p in pos: + # do something cool with it + pass + + """ + if self.posIndex < len(self.positions): + pos = self.positions[self.posIndex] + self.posOffsets[self.posIndex] + self.posIndex = self.posIndex + 1 + return pos + elif self.repeatfirstpos and self.posIndex == len(self.positions): + pos = self.positions[0] + self.posOffsets[0] + self.posIndex = self.posIndex + 1 + return pos + else: + self.posIndex = 0 + raise StopIteration() + + def getPositions(self): + return [p for p in self] diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/run.py b/psychopy/demos/coder/iohub/eyetracking/validation/run.py new file mode 100644 index 00000000000..87f983ba283 --- /dev/null +++ b/psychopy/demos/coder/iohub/eyetracking/validation/run.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +""" +Example of performing eye tracker validation using ioHub Common Eye Tracker interface +and the ValidationProcedure utility class. +""" +import time +from weakref import proxy + +from psychopy import visual +from psychopy.iohub import launchHubServer +from psychopy.iohub.constants import EventConstants + +from trigger import TimeTrigger, DeviceEventTrigger +from posgrid import PositionGrid +from validationroutine import ValidationProcedure + +class TargetStim(object): + def __init__(self, + win, + radius=None, # The outer radius of the target. + fillcolor=None, # The color used to fill the target body. + edgecolor=None, # The color for the edge around the target. + edgewidth=None, # The thickness of the target outer edge. + dotcolor=None, # The color of the central target dot. + dotradius=None, # The radius to use for the target dot. + units=None, # The psychopy unit type of any size values. + colorspace=None, # The psychopy color space of any colors. + opacity=1.0, # The transparency of the target (0.0 - 1.0) + contrast=1.0 # The contrast of the target stim. + ): + """ + TargetStim is a 'doughnut' style target graphic used during the validation procedure. + + :param win: + :param radius: + :param fillcolor: + :param edgecolor: + :param edgewidth: + :param dotcolor: + :param dotradius: + :param units: + :param colorspace: + :param opacity: + :param contrast: + """ + self.win = proxy(win) + self.stim = [] + self.radius = radius + outer = visual.Circle(self.win, radius=radius, fillColor=fillcolor, lineColor=edgecolor, lineWidth=edgewidth, + edges=32, units=units, colorSpace=colorspace, opacity=opacity, + contrast=contrast, interpolate=True, autoLog=False) + self.stim.append(outer) + + if dotcolor and dotcolor != fillcolor: + centerdot = visual.Circle(self.win, radius=dotradius, fillColor=dotcolor, lineColor=dotcolor, + lineWidth=0.0, edges=32, interpolate=True, units=units, + colorSpace=colorspace, opacity=opacity, contrast=contrast, autoLog=False) + self.stim.append(centerdot) + + def setRadius(self, r): + """ + Update the radius of the target stim. + """ + self.stim[0].radius = r + + def setPos(self, pos): + """ + Set the center position of the target stim. + """ + for s in self.stim: + s.setPos(pos) + + def draw(self): + """ + Draw the Target stim. + """ + for s in self.stim: + s.draw() + + def contains(self, p): + """ + Is point p contained within the Target Stim? + :param p: x, y position in stim units + :return: bool + """ + return self.stim[0].contains(p) + + +if __name__ == "__main__": + # Create a default PsychoPy Window + win = visual.Window((1920, 1080), fullscr=True, allowGUI=False, monitor='55w_60dist') + + + exp_code = 'targetdisplay' + sess_code = 'S_{0}'.format(int(time.mktime(time.localtime()))) + + # Create ioHub Server config settings.... + iohub_config = dict() + iohub_config['experiment_code'] = exp_code + iohub_config['session_code'] = sess_code + + # Add an eye tracker device + et_interface_name = 'eyetracker.hw.mouse.EyeTracker' + iohub_config[et_interface_name] = dict(name='tracker') + + # Start ioHub event monitoring process.... + io = launchHubServer(window=win, **iohub_config) + + # Get the keyboard and mouse devices for future access. + keyboard = io.devices.keyboard + tracker = io.devices.tracker + experiment = io.devices.experiment + + # run eyetracker calibration + r = tracker.runSetupProcedure() + + + # Create a TargetStim instance + target = TargetStim(win, + radius=0.025, + fillcolor=[.5, .5, .5], + edgecolor=[-1, -1, -1], + edgewidth=2, + dotcolor=[1, -1, -1], + dotradius=0.005, + units='norm', + colorspace='rgb' + ) + + # Create a PositionGrid instance that will hold the locations to display the + # target at. The example lists all possible keyword arguments that are + # supported. If bounds is None, the ioHub Display device is used + # to get the bounding box to be used. + #positions = PositionGrid(bounds=None, # bounding rect of the window, in window unit coords. + # shape=3, # Create a grid with 3 cols * 3 rows. + # posCount=None, + # leftMargin=None, + # rightMargin=None, + # topMargin=None, + # bottomMargin=None, + # scale=0.85, # Equally space the 3x3 grid across 85% + # of the window width and height. + # posList=None, + # noiseStd=None, + # firstposindex=4, # Use the center position grid + # location as the first point in + # the position order. + # repeatFirstPos=True # Redisplay first target position + # as the last target position. + # ) + # randomize the grid position presentation order (not including + # the first position). + #positions.randomize() + #print("positions: ", [(p[0], p[1]) for p in positions.getPositions()]) + + positions = [(0.0, 0.0), (0.85, 0.85), (-0.85, 0.0), (0.85, 0.0), (0.85, -0.85), (-0.85, 0.85), + (-0.85, -0.85), (0.0, 0.85), (0.0, -0.85)] + + + # Specifiy the Triggers to use to move from target point to point during + # the validation sequence.... + + # Use DeviceEventTrigger to create a keyboard event trigger + # which will fire when the space key is pressed. + kb_trigger = DeviceEventTrigger(io.getDevice('keyboard'), + event_type=EventConstants.KEYBOARD_RELEASE, + event_attribute_conditions={'key': ' '}, + repeat_count=0) + + # Creating a list of Trigger instances. The first one that + # fires will cause the start of the next target position + # presentation. + multi_trigger = (TimeTrigger(start_time=None, delay=2.5), kb_trigger) + + + # define a dict containing any animation params to be used, + # None's to disable animation + targ_anim_param = dict(velocity=1.0, # 800.0, + expandedscale=3.0, # 2.0, + expansionduration=0.2, # 0.1, + contractionduration=0.4) # 0.1 + print(win.units) + print(target.stim[0].units) + # Create a validation procedure + vin_txt = 'Validation procedure is now going to be performed.' + validation_proc = ValidationProcedure(win, target, positions, + target_animation_params=targ_anim_param, + background=None, + triggers=multi_trigger, #kb_trigger,#multi_trigger, + storeeventsfor=None, + accuracy_period_start=0.550, + accuracy_period_stop=.150, + show_intro_screen=True, + intro_text=vin_txt, + show_results_screen=True, + results_in_degrees=False, + randomize_positions=False) + + # Run the validation procedure. The run() method does not return until + # the validation is complete. The calculated validation results, and data + # collected for the analysis, are returned. + results = validation_proc.run() + + # The last run validation results can also be retrieved using: + # results = validation_proc.getValidationResults() + + io.quit() diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/trigger.py b/psychopy/demos/coder/iohub/eyetracking/validation/trigger.py new file mode 100644 index 00000000000..2346a0133a1 --- /dev/null +++ b/psychopy/demos/coder/iohub/eyetracking/validation/trigger.py @@ -0,0 +1,231 @@ +from psychopy import core +from psychopy.iohub.constants import EventConstants +from psychopy.iohub.client import ioHubConnection + +getTime = core.getTime + + +class Trigger(object): + io = None + + def __init__(self, trigger_function=lambda a, b, c: True, user_kwargs={}, repeat_count=0): + Trigger.io = ioHubConnection.getActiveConnection() + self.trigger_function = trigger_function + self.user_kwargs = user_kwargs + self._last_triggered_event = None + self._last_triggered_time = None + self.repeat_count = repeat_count + self.triggered_count = 0 + + def triggered(self, **kwargs): + if 0 <= self.repeat_count < self.triggered_count: + return False + return True + + def getTriggeringEvent(self): + return self._last_triggered_event + + def getTriggeringTime(self): + return self._last_triggered_time + + def getTriggeredStateCallback(self): + return self.trigger_function, self.user_kwargs + + def resetLastTriggeredInfo(self): + self._last_triggered_event = None + self._last_triggered_time = None + + def resetTrigger(self): + self.resetLastTriggeredInfo() + self.triggered_count = 0 + + @classmethod + def getEventBuffer(cls, copy=False): + return {} + + @classmethod + def clearEventHistory(cls, returncopy=False): + if returncopy: + return {} + + @classmethod + def getTriggersFrom(cls,triggers): + """ + Returns a list of Trigger instances generated based on the contents of the + input triggers. + + :param io: + :param triggers: + :return: + """ + # Handle different valid trigger object types + trig_list = () + io = cls.io + if isinstance(triggers, (list, tuple)): + # Support is provided for a list of Trigger objects or a list of + # strings. + t1 = triggers[0] + if isinstance(t1, str): + # triggers is a list of strings, so try and create a list of + # DeviceEventTrigger's using keyboard device, KEYBOARD_RELEASE + # event type, and the triggers list elements each as the + # event.key. + kbtriggers = [] + for c in triggers: + kbtriggers.append(KeyboardTrigger(c, on_press=False)) + trig_list = kbtriggers + else: + # Assume triggers is a list of Trigger objects + trig_list = triggers + elif isinstance(triggers, (int, float)): + # triggers is a number, so assume a TimeTrigger is wanted where + # the delay == triggers. start time will be the fliptime of the + # last update for drawing to the new target position. + trig_list = (TimeTrigger(start_time=None, delay=triggers),) + elif isinstance(triggers, str): + # triggers is a string, so try and create a + # DeviceEventTrigger using keyboard device, KEYBOARD_RELEASE + # event type, and triggers as the event.key. + trig_list = [KeyboardTrigger(triggers, on_press=False), ] + elif isinstance(triggers, Trigger): + # A single Trigger object was provided + trig_list = (triggers,) + else: + raise ValueError('The triggers kwarg could not be understood as a valid triggers input value.') + return trig_list + +class TimeTrigger(Trigger): + """ + A TimeTrigger associates a delay from the provided start_time + parameter to when the classes triggered() method returns True. + start_time and delay can be sec.msec float, or a callable object + (that takes no parameters). + """ + def __init__(self, start_time, delay, repeat_count=0, trigger_function=lambda a, b, c: True, user_kwargs={}): + Trigger.__init__(self, trigger_function, user_kwargs, repeat_count) + + self._start_time = start_time + + if start_time is None or not callable(start_time): + def startTimeFunc(): + if self._start_time is None: + self._start_time = getTime() + return self._start_time + self.startTime = startTimeFunc + else: + self.startTime = start_time + + self.delay = delay + if not callable(delay): + def delayFunc(): + return delay + self.delay = delayFunc + + def triggered(self, **kwargs): + if Trigger.triggered(self) is False: + return False + + if self.startTime is None: + start_time = kwargs.get('start_time') + else: + start_time = self.startTime() + + if self.delay is None: + delay = kwargs.get('delay') + else: + delay = self.delay() + + ct = getTime() + if ct - start_time >= delay: + self._last_triggered_time = ct + self._last_triggered_event = ct + self.triggered_count += 1 + return True + return False + + def resetTrigger(self): + self.resetLastTriggeredInfo() + self.triggered_count = 0 + self._start_time = None + + +class DeviceEventTrigger(Trigger): + """ + A DeviceEventTrigger associates a set of conditions for a + DeviceEvent that must be met before the classes triggered() method + returns True. + """ + _lastEventsByDevice = dict() + def __init__(self, device, event_type, event_attribute_conditions={}, repeat_count=-1, + trigger_function=lambda a, b, c: True, user_kwargs={}): + Trigger.__init__(self, trigger_function, user_kwargs, repeat_count) + self.device = device + self.event_type = event_type + self.event_attribute_conditions = event_attribute_conditions + + def triggered(self, **kwargs): + if Trigger.triggered(self) is False: + return False + + events = self.device.getEvents() + if events is None: + events = [] + if self.device in self._lastEventsByDevice: + self._lastEventsByDevice[self.device].extend(events) + else: + self._lastEventsByDevice[self.device] = events + unhandledEvents = self._lastEventsByDevice.get(self.device, []) + + for event in unhandledEvents: + foundEvent = True + if event.type != self.event_type: + foundEvent = False + else: + for (attrname, conds) in self.event_attribute_conditions.items(): + if isinstance(conds, (list, tuple)) and getattr(event, attrname) in conds: + # event_value is a list or tuple of possible values + # that are OK + pass + elif getattr(event, attrname) is conds or getattr(event, attrname) == conds: + # event_value is a single value + pass + else: + foundEvent = False + + if foundEvent is True: + self._last_triggered_time = getTime() + self._last_triggered_event = event + self.triggered_count += 1 + return True + + return False + + @classmethod + def getEventBuffer(cls, copy=False): + if copy: + return dict(cls._lastEventsByDevice) + return cls._lastEventsByDevice + + @classmethod + def clearEventHistory(cls, returncopy=False): + eventbuffer = None + if returncopy: + eventbuffer = dict(cls._lastEventsByDevice) + cls._lastEventsByDevice.clear() + return eventbuffer + + def resetLastTriggeredInfo(self): + Trigger.resetLastTriggeredInfo(self) + if self.device in self._lastEventsByDevice: + del self._lastEventsByDevice[self.device] + + +class KeyboardTrigger(DeviceEventTrigger): + def __init__(self, key, on_press=False): + if on_press: + etype = EventConstants.KEYBOARD_PRESS + else: + etype = EventConstants.KEYBOARD_RELEASE + DeviceEventTrigger.__init__(self, self.io.devices.keyboard, event_type=etype, + event_attribute_conditions={'key': key}) + diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py b/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py new file mode 100644 index 00000000000..a3faebcd640 --- /dev/null +++ b/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py @@ -0,0 +1,1040 @@ +# -*- coding: utf-8 -*- +""" +Eye Tracker Validation procedure using the ioHub common eye tracker interface. + +To use the validation process from within a Coder script: +* Create an instance of TargetStim, specifying the fixation target appearance. +* Create an instance of PositionGrid, which defines target position information. +* Create a TargetPosSequenceStim instance, providing the TargetStim and + PositionGrid objects created, as well as the Trigger's which should be used + to transition from one target position to another during the sequence of + target graphics presentation and the defined positions. +* Use TargetPosSequenceStim.display() to run the full presentation procedure. +* Use TargetPosSequenceStim.targetdata to access information about each target + position displayed and the events collected during the display duration for + each position. + +See demos/coder/iohub/eyetracking/validation.py for a complete example. +""" +from weakref import proxy +import numpy as np +from time import sleep +import os +import sys +from PIL import Image +from collections import OrderedDict + +from psychopy import visual, core +from psychopy.iohub.util import win32MessagePump, normjoin +from psychopy.iohub.constants import EventConstants +from psychopy.iohub.client import ioHubConnection +from psychopy.tools.monitorunittools import convertToPix +from psychopy.tools.monitorunittools import pix2deg + +from posgrid import PositionGrid +from trigger import Trigger + +getTime = core.getTime + + +def toPix(win, x, y): + """Returns the stim's position in pixels, + based on its pos, units, and win. + """ + xy = np.zeros((len(x), 2)) + xy[:, 0] = x + xy[:, 1] = y + r = convertToPix(np.asarray((0, 0)), xy, win.units, win) + return r[:, 0], r[:, 1] + + +def toDeg(win, x, y): + xy = np.zeros((len(x), 2)) + xy[:, 0] = x + xy[:, 1] = y + r = pix2deg(xy, win.monitor, correctFlat=False) + return r[:, 0], r[:, 1] + +class TargetPosSequenceStim(object): + TARGET_STATIONARY = 1 + TARGET_MOVING = 2 + TARGET_EXPANDING = 4 + TARGET_CONTRACTING = 8 + # Experiment Message text field types and tokens + message_types = dict(BEGIN_SEQUENCE=('BEGIN_SEQUENCE', '', int), + DONE_SEQUENCE=('DONE_SEQUENCE', '', int), + NEXT_POS_TRIG=('NEXT_POS_TRIG', '', int, float), + START_DRAW=('START_DRAW', ',', int, float, float, float, float), + SYNCTIME=('SYNCTIME', ',', int, float, float, float, float), + EXPAND_SIZE=('EXPAND_SIZE', '', float, float), + CONTRACT_SIZE=('CONTRACT_SIZE', '', float, float), + POS_UPDATE=('POS_UPDATE', ',', float, float), + TARGET_POS=('TARGET_POS', ',', float, float)) + max_msg_type_length = max([len(s) for s in message_types.keys()]) + binocular_sample_message_element = [('targ_pos_ix', np.int), + ('last_msg_time', np.float64), + ('last_msg_type', np.str, max_msg_type_length), + ('next_msg_time', np.float64), + ('next_msg_type', np.str, max_msg_type_length), + ('targ_pos_x', np.float64), + ('targ_pos_y', np.float64), + ('targ_state', np.int), + ('eye_time', np.float64), + ('eye_status', np.int), + ('left_eye_x', np.float64), + ('left_eye_y', np.float64), + ('left_pupil_size', np.float64), + ('right_eye_x', np.float64), + ('right_eye_y', np.float64), + ('right_pupil_size', np.float64)] + monocular_sample_message_element = [('targ_pos_ix', np.int), + ('last_msg_time', np.float64), + ('last_msg_type', np.str, max_msg_type_length), + ('next_msg_time', np.float64), + ('next_msg_type', np.str, max_msg_type_length), + ('targ_pos_x', np.float64), + ('targ_pos_y', np.float64), + ('targ_state', np.int), + ('eye_time', np.float64), + ('eye_status', np.int), + ('eye_x', np.float64), + ('eye_y', np.float64), + ('pupil_size', np.float64)] + + def __init__(self, win, target, positions, background=None, storeeventsfor=[], triggers=None, msgcategory='', + config=None, io=None, terminate_key='escape'): + """ + TargetPosSequenceStim combines an instance of a Target stim and an + instance of a PositionGrid to create everything needed to present the + target at each position returned by the PositionGrid instance within the + psychopy window used to create the Target stim. The target is presented at + each position sequentially. + + By providing keyword arguments to the TargetPosSequenceStim.display(...) + method, position animation between target positions, and target stim + expansion and / or contraction transitions are possible. + + psychopy.iohub.Trigger based classes are used to define the criteria used to + start displaying the next target position graphics. By providing a list + of a TimerTrigger and a set of DeviceEventTriggers, complex criteria + for target position pacing can be easily defined for use during the display + period. + + iohub devices can be provided in the storeeventsfor keyword argument. + Events which occur during each target position presentation period are + stored and are available at the end of the display() period, grouped by + position index and device event types. + + :param win: + :param target: + :param positions: + :param background: + :param storeeventsfor: + :param triggers: + :param msgcategory: + :param config: + :param io: + """ + self.terminate_key = terminate_key + self._terminate_requested = False + self.win = proxy(win) + self.target = target + self.background = background + self.positions = positions + self.storeevents = storeeventsfor + self.msgcategory = msgcategory + + if io is None: + io = ioHubConnection.getActiveConnection() + self.io = io + + # If storeevents is True, targetdata will be a list of dict's. + # Each dict, among other things, contains all ioHub events that occurred + # from when a target was first presented at a position, to when the + # the wait period completed for that position. + # + self.targetdata = [] + self.triggers = triggers + + + + def getIO(self): + """ + Get the active ioHubConnection instance. + """ + return self.io + + def _draw(self): + """ + Fill the window with the specified background color and draw the + target stim. + """ + if self.background: + self.background.draw() + self.target.draw() + + def _animateTarget(self, topos, frompos, **kwargs): + """ + Any logic related to drawing the target at the new screen position, + including any intermediate animation effects, is done here. + + Return the flip time when the target was first drawn at the newpos + location. + """ + io = self.getIO() + if frompos is not None: + velocity = kwargs.get('velocity') + if velocity: + starttime = getTime() + a, b = np.abs(topos - frompos) ** 2 + duration = np.sqrt(a + b) / velocity + arrivetime = duration + starttime + fliptime = starttime + while fliptime < arrivetime: + mu = (fliptime - starttime) / duration + tpos = frompos * (1.0 - mu) + topos * mu + self.target.setPos(frompos * (1.0 - mu) + topos * mu) + self._draw() + fliptime = self.win.flip() + io.sendMessageEvent('POS_UPDATE %.4f,%.4f' % (tpos[0], tpos[1]), self.msgcategory, + sec_time=fliptime) + self._addDeviceEvents() + if self._terminate_requested: + return 0 + + self.target.setPos(topos) + self._draw() + fliptime = self.win.flip() + io.sendMessageEvent('TARGET_POS %.4f,%.4f' % (topos[0], topos[1]), self.msgcategory, sec_time=fliptime) + self._addDeviceEvents() + + expandedscale = kwargs.get('expandedscale') + expansionduration = kwargs.get('expansionduration') + contractionduration = kwargs.get('contractionduration') + + initialradius = self.target.radius + if expandedscale: + expandedradius = self.target.radius * expandedscale + + if expansionduration: + starttime = fliptime + expandedtime = fliptime + expansionduration + while fliptime < expandedtime: + mu = (fliptime - starttime) / expansionduration + cradius = initialradius * (1.0 - mu) + expandedradius * mu + self.target.setRadius(cradius) + self._draw() + fliptime = self.win.flip() + io.sendMessageEvent('EXPAND_SIZE %.4f %.4f' % (cradius, initialradius), self.msgcategory, + sec_time=fliptime) + self._addDeviceEvents() + if self._terminate_requested: + return 0 + if contractionduration: + starttime = fliptime + contractedtime = fliptime + contractionduration + while fliptime < contractedtime: + mu = (fliptime - starttime) / contractionduration + cradius = expandedradius * (1.0 - mu) + initialradius * mu + self.target.setRadius(cradius) + self._draw() + fliptime = self.win.flip() + io.sendMessageEvent('CONTRACT_SIZE %.4f %.4f' % (cradius, initialradius), self.msgcategory, + sec_time=fliptime) + self._addDeviceEvents() + if self._terminate_requested: + return 0 + + self.target.setRadius(initialradius) + return fliptime + + def moveTo(self, topos, frompos, **kwargs): + """ + Indicates that the target should be moved frompos to topos. + + If a PositionGrid has been provided, moveTo should not be called + directly. Instead, use the display() method to start the full + target position presentation sequence. + """ + io = self.getIO() + fpx, fpy = -1, -1 + if frompos is not None: + fpx, fpy = frompos[0], frompos[1] + io.sendMessageEvent('START_DRAW %d %.4f,%.4f %.4f,%.4f' % (self.positions.posIndex, fpx, fpy, topos[0], + topos[1]), self.msgcategory) + fliptime = self._animateTarget(topos, frompos, **kwargs) + io.sendMessageEvent('SYNCTIME %d %.4f,%.4f %.4f,%.4f' % (self.positions.posIndex, fpx, fpy, topos[0], topos[1]), + self.msgcategory, sec_time=fliptime) + + # wait for trigger to fire + last_pump_time = fliptime + trig_fired = self._hasTriggerFired(start_time=fliptime) + while not trig_fired: + if getTime() - last_pump_time >= 0.250: + win32MessagePump() + last_pump_time = getTime() + sleep(0.001) + if self._checkForTerminate(): + return + trig_fired = self._hasTriggerFired(start_time=fliptime) + + def _hasTriggerFired(self, **kwargs): + """ + Used internally to know when one of the triggers has occurred and + the target should move to the next target position. + """ + # wait for trigger to fire + triggered = None + for trig in self.triggers: + if trig.triggered(**kwargs): + triggered = trig + break + self._addDeviceEvents(trig.clearEventHistory(True)) + if triggered: + # by default, assume it was a timer trigger,so use 255 as 'event type' + event_type_id = 255 + trig_evt = triggered.getTriggeringEvent() + if hasattr(trig_evt, 'type'): + # actually it was a device event trigger + event_type_id = trig_evt.type + # get time trigger of trigger event + event_time = triggered.getTriggeringTime() + self.getIO().sendMessageEvent('NEXT_POS_TRIG %d %.3f' % (event_type_id, event_time), self.msgcategory) + for trig in self.triggers: + trig.resetTrigger() + return triggered + + def _initTargetData(self, frompos, topos): + """ + Internally used to create the data structure used to store position + information and events which occurred during each target position + period. + """ + if self.storeevents: + deviceevents = {} + for device in self.storeevents: + deviceevents[device] = [] + self.targetdata.append(dict(frompos=frompos, topos=topos, events=deviceevents)) + + def _addDeviceEvents(self, device_event_dict={}): + if self._checkForTerminate(): + return + dev_event_buffer = self.targetdata[-1]['events'] + for dev, dev_events in dev_event_buffer.items(): + if dev in device_event_dict: + dev_events.extend(device_event_dict[dev]) + else: + dev_events.extend(dev.getEvents()) + + def _checkForTerminate(self): + self._terminate_requested = len(self.io.devices.keyboard.getPresses(keys=self.terminate_key, clear=False)) > 0 + return self._terminate_requested + + def display(self, **kwargs): + """ + Display the target at each point in the position grid, performing + target animation if requested. The target then holds position until one + of the specified triggers occurs, resulting in the target moving to the + next position in the positiongrid. + + To setup target animation between grid positions, the following keyword + arguments are supported. If an option is not specified, the animation + related to it is not preformed. + + velocity: The rate (units / second) at which the target should move + from a current target position to the next target position. + The value should be in the unit type the target stimulus + is using. + + expandedscale: When a target stimulus is at the current grid position, + the target graphic can expand to a size equal to the + original target radius * expandedscale. + + expansionduration: If expandedscale has been specified, this option is + used to set how long it should take for the target to + reach the full expanded target size. Time is in sec. + + contractionduration: If a target has been expanded, this option is used + to specify how many seconds it should take for the + target to contract back to the original target + radius. + + Note that target expansion and contraction change the target stimulus + outer diameter only. The edge thickness and central dot radius do not + change. + + All movement and size changes are linear in fashion. + + For example, to display a static target at each grid position:: + + targetsequence.display() + + To have the target stim move between each grid position + at 400 pixels / sec and not expand or contract:: + + targetsequence.display(velocity=400.0) + + If the target should jump from one grid position to the next, and then + expand to twice the radius over a 0.5 second period:: + + targetsequence.display( + expandedscale=2.0, + expansionduration=0.50 + ) + + To do a similar animation as the pervious example, but also have the + target contract back to it's original size over 0.75 seconds:: + + targetsequence.display( + expandedscale=2.0, + expansionduration=0.50, + contractionduration=0.75 + ) + + When this method returns, the target has been displayed at all + positions. Data collected for each position period can be accessed via + the targetdata attribute. + """ + del self.targetdata[:] + prevpos = None + + io = self.getIO() + io.clearEvents('all') + io.sendMessageEvent('BEGIN_SEQUENCE {0}'.format(len(self.positions.positions)), self.msgcategory) + turn_rec_off = [] + for d in self.storeevents: + if not d.isReportingEvents(): + d.enableEventReporting(True) + turn_rec_off.append(d) + + sleep(0.025) + for pos in self.positions: + self._initTargetData(prevpos, pos) + self._addDeviceEvents() + if self._terminate_requested: + break + self.moveTo(pos, prevpos, **kwargs) + prevpos = pos + self._addDeviceEvents() + if self._terminate_requested: + break + + for d in turn_rec_off: + d.enableEventReporting(False) + + if self._terminate_requested: + io.sendMessageEvent('VALIDATION TERMINATED BY USER', self.msgcategory) + return False + + io.sendMessageEvent('DONE_SEQUENCE {0}'.format( + len(self.positions.positions)), self.msgcategory) + sleep(0.025) + self._addDeviceEvents() + io.clearEvents('all') + return True + + def _processMessageEvents(self): + self.target_pos_msgs = [] + self.saved_pos_samples = [] + for pd in self.targetdata: + frompos = pd.get('frompos') + topos = pd.get('topos') + events = pd.get('events') + + # create a dict of device labels as keys, device events as value + devlabel_events = {} + for k, v in events.items(): + devlabel_events[k.getName()] = v + + samples = devlabel_events.get('tracker', []) + # remove any eyetracker events that are not samples + samples = [s for s in samples if s.type in (EventConstants.BINOCULAR_EYE_SAMPLE, + EventConstants.MONOCULAR_EYE_SAMPLE)] + self.saved_pos_samples.append(samples) + + self.sample_type = self.saved_pos_samples[0][0].type + if self.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + self.sample_msg_dtype = self.binocular_sample_message_element + else: + self.sample_msg_dtype = self.monocular_sample_message_element + + messages = devlabel_events.get('experiment', []) + msg_lists = [] + for m in messages: + temp = m.text.strip().split() + msg_type = self.message_types.get(temp[0]) + if msg_type: + current_msg = [m.time, m.category] + if msg_type[1] == ',': + for t in temp: + current_msg.extend(t.split(',')) + else: + current_msg.extend(temp) + + for mi, dtype in enumerate(msg_type[2:]): + current_msg[mi + 3] = dtype(current_msg[mi + 3]) + + msg_lists.append(current_msg) + + if msg_lists[0][2] == 'NEXT_POS_TRIG': + # handle case where the trigger msg from the previous target + # message was not read until the start of the next pos. + # In which case, move msg to end of previous targ pos msgs + npm = msg_lists.pop(0) + self.target_pos_msgs[-1].append(npm) + + self.target_pos_msgs.append(msg_lists) + + for i in range(len(self.target_pos_msgs)): + self.target_pos_msgs[i] = np.asarray(self.target_pos_msgs[i], dtype=object) + + return self.target_pos_msgs + + def getSampleMessageData(self): + """ + Return a list of numpy ndarrays, each containing joined eye sample + and previous / next experiment message data for the sample's time. + """ + # preprocess message events + self._processMessageEvents() + + # inline func to return sample field array based on sample namedtup + def getSampleData(s): + sampledata = [s.time, s.status] + if self.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + sampledata.extend((s.left_gaze_x, s.left_gaze_y, s.left_pupil_measure1, + s.right_gaze_x, s.right_gaze_y, s.right_pupil_measure1)) + return sampledata + + sampledata.extend((s.gaze_x, s.gaze_y, s.pupil_measure1)) + return sampledata + + current_target_pos = -1.0, -1.0 + current_targ_state = 0 + target_pos_samples = [] + for pindex, samples in enumerate(self.saved_pos_samples): + last_msg, messages = self.target_pos_msgs[pindex][0], self.target_pos_msgs[pindex][1:] + samplesforposition = [] + pos_sample_count = len(samples) + si = 0 + for current_msg in messages: + last_msg_time = last_msg[0] + last_msg_type = last_msg[2] + if last_msg_type == 'START_DRAW': + if not current_targ_state & self.TARGET_STATIONARY: + current_targ_state += self.TARGET_STATIONARY + current_targ_state -= current_targ_state & self.TARGET_MOVING + current_targ_state -= current_targ_state & self.TARGET_EXPANDING + current_targ_state -= current_targ_state & self.TARGET_CONTRACTING + elif last_msg_type == 'EXPAND_SIZE': + if not current_targ_state & self.TARGET_EXPANDING: + current_targ_state += self.TARGET_EXPANDING + current_targ_state -= current_targ_state & self.TARGET_CONTRACTING + elif last_msg_type == 'CONTRACT_SIZE': + if not current_targ_state & self.TARGET_CONTRACTING: + current_targ_state += self.TARGET_CONTRACTING + current_targ_state -= current_targ_state & self.TARGET_EXPANDING + elif last_msg_type == 'TARGET_POS': + current_target_pos = float(last_msg[3]), float(last_msg[4]) + current_targ_state -= current_targ_state & self.TARGET_MOVING + if not current_targ_state & self.TARGET_STATIONARY: + current_targ_state += self.TARGET_STATIONARY + elif last_msg_type == 'POS_UPDATE': + current_target_pos = float(last_msg[3]), float(last_msg[4]) + if not current_targ_state & self.TARGET_MOVING: + current_targ_state += self.TARGET_MOVING + current_targ_state -= current_targ_state & self.TARGET_STATIONARY + elif last_msg_type == 'SYNCTIME': + if not current_targ_state & self.TARGET_STATIONARY: + current_targ_state += self.TARGET_STATIONARY + current_targ_state -= current_targ_state & self.TARGET_MOVING + current_targ_state -= current_targ_state & self.TARGET_EXPANDING + current_targ_state -= current_targ_state & self.TARGET_CONTRACTING + current_target_pos = float(last_msg[6]), float(last_msg[7]) + + while si < pos_sample_count: + sample = samples[si] + if sample.time >= last_msg_time and sample.time < current_msg[0]: + sarray = [pindex, last_msg_time, last_msg_type, + current_msg[0], current_msg[2], + current_target_pos[0], current_target_pos[1], + current_targ_state] + sarray.extend(getSampleData(sample)) + sndarray = np.asarray(tuple(sarray), dtype=self.sample_msg_dtype) + samplesforposition.append(sndarray) + si += 1 + elif sample.time >= current_msg[0]: + break + else: + si += 1 + last_msg = current_msg + + possamples = np.asanyarray(samplesforposition) + target_pos_samples.append(possamples) + + # So we now have a list len == number target positions. Each element + # of the list is a list of all eye sample / message data for a + # target position. Each element of the data list for a single target + # position is itself a list that that contains combined info about + # an eye sample and message info valid for when the sample time was. + return np.asanyarray(target_pos_samples, dtype=object) + + +class ValidationProcedure(object): + def __init__(self, win=None, target=None, positions=None, target_animation_params={}, randomize_positions=True, + background=None, triggers=2.0, storeeventsfor=None, accuracy_period_start=0.350, + accuracy_period_stop=.050, show_intro_screen=True, intro_text='Ready to Start Validation Procedure.', + show_results_screen=True, results_in_degrees=False, save_figure_path=None, + terminate_key="escape"): + """ + ValidationProcedure can be used to check the accuracy of a calibrated + eye tracking system. + + Once a ValidationProcedure class instance has been created, the display(**kwargs) method + can be called to run the validation process. + + The validation process consists of the following stages: + + 1) Display an Introduction / Instruction screen. A key press is used to + start target presentation. + 2) The validation target presentation sequence. Based on the Target and + PositionGrid objects provided when the ValidationProcedure was created, + a series of target positions are displayed. The progression from one + target position to the next is controlled by the triggers specified. + The target can simply jump from one position to the next, or optional + linear motion settings can be used to have the target move across the + screen from one point to the next. The Target graphic itself can also + be configured to expand or contract once it has reached a location + defined in the position grid. + 3) During stage 2), data is collected from the devices being monitored by + iohub. Specifically eye tracker samples and experiment messages are + collected. + 4) The data collected during the validation target sequence is used to + calculate accuracy information for each target position presented. + The raw data as well as the computed accuracy data is available via the + ValidationProcedure class. Calculated measures are provided seperately + for each target position and include: + + a) An array of the samples used for the accuracy calculation. The + samples used are selected using the following criteria: + i) Only samples where the target was stationary and + not expanding or contracting are selected. + + ii) Samples are selected that fall between: + + start_time_filter = last_sample_time - accuracy_period_start + + and + + end_time_filter = last_sample_time - accuracy_period_end + + Therefore, the duration of the selected sample period is: + + selection_period_dur = end_time_filter - start_time_filter + + iii) Sample that contain missing / invalid position data + are then removed, providing the final set of samples + used for accuracy calculations. The min, max, and mean + values from each set of selected samples is calculated. + + b) The x and y error of each samples gaze position relative to the + current target position. This data is in the same units as is + used by the Target instance. Computations are done for each eye + being recorded. The values are signed floats. + + c) The xy distance error from the from each eye's gaze position to + the target position. This is also calculated as an average of + both eyes when binocular data is available. The data is unsigned, + providing the absolute distance from gaze to target positions + + 5) A 2D plot is created displaying each target position and the position of + each sample used for the accuracy calculation. The minimum, maximum, and + average error is displayed for all target positions. A key press is used + to remove the validation results plot, and control is returned to the + script that started the validation display. Note that the plot is also + saved as a png file in the same directory as the calling stript. + + See the validation.py demo in demos.coder.iohub.eyetracker for example usage. + + :param win: + :param target: + :param positions: + :param target_animation_params: + :param randomize_positions: + :param background: + :param triggers: + :param storeeventsfor: + :param accuracy_period_start: + :param accuracy_period_stop: + :param show_intro_screen: + :param intro_text: + :param show_results_screen: + :param results_in_degrees: + :param save_figure_path: + :param terminate_key: + """ + self.terminate_key = terminate_key + self.io = ioHubConnection.getActiveConnection() + + if isinstance(positions, (list, tuple)): + positions = PositionGrid(posList=positions, firstposindex=0, repeatFirstPos=False) + + self.positions = positions + + self.randomize_positions = randomize_positions + if self.randomize_positions: + self.positions.randomize() + self.win = proxy(win) + if target_animation_params is None: + target_animation_params = {} + self.animation_params = target_animation_params + self.accuracy_period_start = accuracy_period_start + self.accuracy_period_stop = accuracy_period_stop + self.show_intro_screen = show_intro_screen + self.intro_text = intro_text + self.show_results_screen = show_results_screen + self.results_in_degrees = results_in_degrees + self.save_figure_path = save_figure_path + self.validation_results = None + if storeeventsfor is None: + storeeventsfor = [self.io.devices.keyboard, + self.io.devices.mouse, + self.io.devices.tracker, + self.io.devices.experiment + ] + # Create the TargetPosSequenceStim instance; used to control the sequential + # presentation of the target at each of the grid positions. + triggers = Trigger.getTriggersFrom(triggers) + self.targetsequence = TargetPosSequenceStim(win, target=target, positions=self.positions, background=background, + triggers=triggers, storeeventsfor=storeeventsfor, + terminate_key=terminate_key) + # Stim for results screen + self.imagestim = None + self.textstim = None + self.use_dpi = 90 + + def run(self): + """ + Run the validation procedure, returning after the full validation process is complete, including: + a) display of an instruction screen + b) display of the target position sequence used for validation data collection. + c) display of a validation accuracy results plot. + """ + if self.show_intro_screen: + # Display Validation Intro Screen + self.showIntroScreen() + if self.terminate_key and self.terminate_key in self.io.devices.keyboard.waitForReleases(keys=None): + print("Escape key pressed. Exiting validation") + self.validation_results = None + return + + # Perform Validation..... + terminate = not self.targetsequence.display(**self.animation_params) + if terminate: + print("Escape key pressed. Exiting validation") + self.validation_results = None + return + + self.io.clearEvents('all') + + self._createValidationResults() + + if self.show_results_screen: + if self.showResultsScreen() is not None: + if self.terminate_key and self.terminate_key in self.io.devices.keyboard.waitForReleases(keys=None): + print("Escape key pressed. Exiting validation") + self.validation_results = None + return + return self.validation_results + + def showResultsScreen(self): + self._buildResultScreen() + if self.imagestim: + self.imagestim.draw() + self.textstim.draw() + return self.win.flip() + + def showIntroScreen(self): + text = self.intro_text + '\nPress SPACE to Start....' + textpos = (0, 0) + if self.textstim: + self.textstim.setText(text) + self.textstim.setPos(textpos) + else: + self.textstim = visual.TextStim(self.win, text=text, pos=textpos, height=30, color=(0, 0, 0), + colorSpace='rgb255', opacity=1.0, contrast=1.0, units='pix', + ori=0.0, antialias=True, bold=False, italic=False, anchorHoriz='center', + anchorVert='center', wrapWidth=self.win.size[0] * .8) + + self.textstim.draw() + return self.win.flip() + + def getValidationResults(self): + return self.validation_results + + def _createValidationResults(self): + """ + Create validation results dict and save validation analysis info as experiment messages to + the iohub .hdf5 file. + + :return: dict + """ + self.validation_results = None + sample_array = self.targetsequence.getSampleMessageData() + + if self.results_in_degrees: + for postdat in sample_array: + postdat['targ_pos_x'], postdat['targ_pos_y'] = toDeg(self.win, *toPix(self.win, postdat['targ_pos_x'], postdat['targ_pos_y'])) + + if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + postdat['left_eye_x'], postdat['left_eye_y'] = toDeg(self.win, *toPix(self.win, postdat['left_eye_x'], + postdat['left_eye_y'])) + + postdat['right_eye_x'], postdat['right_eye_y'] = toDeg(self.win, *toPix(self.win, postdat['right_eye_x'], + postdat['right_eye_y'])) + else: + postdat['eye_x'], postdat['eye_y'] = toDeg(self.win, *toPix(self.win, postdat['eye_x'], postdat['eye_y'])) + + min_error = 100000.0 + max_error = 0.0 + summed_error = 0.0 + point_count = 0 + + self.io.sendMessageEvent('Results', 'VALIDATION') + results = dict(display_units=self.win.units, display_bounds=self.positions.bounds, + display_pix=self.win.size, position_count=len(sample_array), + target_positions=self.targetsequence.positions.getPositions()) + + for k, v in results.items(): + self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') + + results['position_results'] = [] + results['positions_failed_processing'] = 0 + + for pindex, samplesforpos in enumerate(sample_array): + self.io.sendMessageEvent('Target Position Results: {0}'.format(pindex), 'VALIDATION') + + stationary_samples = samplesforpos[samplesforpos['targ_state'] == self.targetsequence.TARGET_STATIONARY] + + last_stime = stationary_samples[-1]['eye_time'] + first_stime = stationary_samples[0]['eye_time'] + + filter_stime = last_stime - self.accuracy_period_start + filter_etime = last_stime - self.accuracy_period_stop + + all_samples_for_accuracy_calc = stationary_samples[stationary_samples['eye_time'] >= filter_stime] + all_samples_for_accuracy_calc = all_samples_for_accuracy_calc[all_samples_for_accuracy_calc['eye_time'] < filter_etime] + + good_samples_for_accuracy_calc = all_samples_for_accuracy_calc[all_samples_for_accuracy_calc['eye_status'] <= 1] + + all_samples_for_accuracy_count = all_samples_for_accuracy_calc.shape[0] + good_accuracy_sample_count = good_samples_for_accuracy_calc.shape[0] + accuracy_calc_good_sample_perc = good_accuracy_sample_count / float(all_samples_for_accuracy_count) + + # Ordered dictionary of the different levels of samples selected during filtering + # for valid samples to use in accuracy calculations. + sample_msg_data_filtering = OrderedDict(all_samples=samplesforpos, # All samples from target period. + # Sample during stationary period at end of target + # presentation display. + stationary_samples=stationary_samples, + # Samples that occurred within the + # defined time selection period. + time_filtered_samples=all_samples_for_accuracy_calc, + # Samples from the selection period that + # do not have missing data + used_samples=good_samples_for_accuracy_calc) + + position_results = dict(pos_index=pindex, + sample_time_range=[first_stime, last_stime], + filter_samples_time_range=[filter_stime, filter_etime], + valid_filtered_sample_perc=accuracy_calc_good_sample_perc) + + for k, v in position_results.items(): + self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') + + position_results['sample_from_filter_stages'] = sample_msg_data_filtering + + if accuracy_calc_good_sample_perc == 0.0: + position_results['calculation_status'] = 'FAILED' + results['positions_failed_processing'] += 1 + else: + target_x = good_samples_for_accuracy_calc[:]['targ_pos_x'] + target_y = good_samples_for_accuracy_calc[:]['targ_pos_y'] + + if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + left_x = good_samples_for_accuracy_calc[:]['left_eye_x'] + left_y = good_samples_for_accuracy_calc[:]['left_eye_y'] + left_error_x = target_x - left_x + left_error_y = target_y - left_y + left_error_xy = np.hypot(left_error_x, left_error_y) + + right_x = good_samples_for_accuracy_calc[:]['right_eye_x'] + right_y = good_samples_for_accuracy_calc[:]['right_eye_y'] + right_error_x = target_x - right_x + right_error_y = target_y - right_y + right_error_xy = np.hypot(right_error_x, right_error_y) + + lr_error = (right_error_xy + left_error_xy) / 2.0 + lr_error_max = lr_error.max() + lr_error_min = lr_error.min() + lr_error_mean = lr_error.mean() + lr_error_std = np.std(lr_error) + min_error = min(min_error, lr_error_min) + max_error = max(max_error, lr_error_max) + summed_error += lr_error_mean + point_count += 1.0 + else: + eye_x = good_samples_for_accuracy_calc[:]['eye_x'] + eye_y = good_samples_for_accuracy_calc[:]['eye_y'] + error_x = target_x - eye_x + error_y = target_y - eye_y + error_xy = np.hypot(error_x, error_y) + lr_error = error_xy + lr_error_max = lr_error.max() + lr_error_min = lr_error.min() + lr_error_mean = lr_error.mean() + lr_error_std = np.std(lr_error) + min_error = min(min_error, lr_error_min) + max_error = max(max_error, lr_error_max) + summed_error += lr_error_mean + point_count += 1.0 + + position_results2 = dict() + position_results2['calculation_status'] = 'PASSED' + position_results2['target_position'] = (target_x[0], target_y[0]) + position_results2['min_error'] = lr_error_min + position_results2['max_error'] = lr_error_max + position_results2['mean_error'] = lr_error_mean + position_results2['stdev_error'] = lr_error_std + for k, v in position_results2.items(): + self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') + position_results[k] = v + results['position_results'].append(position_results) + self.io.sendMessageEvent('Done Target Position Results : {0}'.format(pindex), 'VALIDATION') + + unit_type = self.win.units + if self.results_in_degrees: + unit_type = 'degrees' + mean_error = summed_error / point_count + err_results = dict(reporting_unit_type=unit_type, min_error=min_error, max_error=max_error, + mean_error=mean_error) + + for k, v in err_results.items(): + self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') + results[k] = v + + self.io.sendMessageEvent('Validation Report Complete', 'VALIDATION') + + self.validation_results = results + return self.validation_results + + def createPlot(self): + """ + Creates a matplotlib figure of validation results. + :return: + """ + from matplotlib import pyplot as pl + + results = self.getValidationResults() + if results is None: + raise RuntimeError("Validation must be run before creating results plot.") + + pixw, pixh = results['display_pix'] + + pl.clf() + fig = pl.gcf() + fig.set_size_inches((pixw * .9) / self.use_dpi, (pixh * .8) / self.use_dpi) + cm = pl.cm.get_cmap('RdYlBu') + + for position_results in results['position_results']: + pindex = position_results['pos_index'] + if position_results['calculation_status'] == 'FAILED': + # Draw nothing for failed position + # TODO: Draw something. ;) + pass + else: + samples = position_results['sample_from_filter_stages']['used_samples'] + time = samples[:]['eye_time'] + target_x = samples[:]['targ_pos_x'] + target_y = samples[:]['targ_pos_y'] + if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + gaze_x = (samples[:]['left_eye_x'] + samples[:]['right_eye_x']) / 2.0 + gaze_y = (samples[:]['left_eye_y'] + samples[:]['right_eye_y']) / 2.0 + else: + gaze_x = samples[:]['eye_x'] + gaze_y = samples[:]['eye_y'] + + normed_time = (time - time.min()) / (time.max() - time.min()) + pl.scatter(target_x[0], target_y[0], s=400, color=[0.75, 0.75, 0.75], alpha=0.5) + pl.text(target_x[0], target_y[0], str(pindex), size=11, horizontalalignment='center', + verticalalignment='center') + pl.scatter(gaze_x, gaze_y, s=40, c=normed_time, cmap=cm, alpha=0.75) + + if self.results_in_degrees: + l, b = toDeg(self.win, (-pixw / 2,), (-pixh / 2, )) + r, t = toDeg(self.win, (pixw / 2, ), (pixh / 2, )) + else: + l, t, r, b = results['display_bounds'] + + pl.xlim(l, r) + pl.ylim(b, t) + pl.xlabel('Horizontal Position (%s)' % (results['reporting_unit_type'])) + pl.ylabel('Vertical Position (%s)' % (results['reporting_unit_type'])) + pl.title('Validation Accuracy (%s)\nMin: %.4f, Max: %.4f, Mean %.4f' % (results['reporting_unit_type'], + results['min_error'], + results['max_error'], + results['mean_error'])) + + # pl.colorbar() + fig.tight_layout() + return fig + + def _generateImageName(self): + import datetime + file_name = 'validation_' + datetime.datetime.now().strftime('%d_%m_%Y_%H_%M') + '.png' + if self.save_figure_path: + return normjoin(self.save_figure_path, file_name) + rootScriptPath = os.path.dirname(sys.argv[0]) + return normjoin(rootScriptPath, file_name) + + def _buildResultScreen(self, replot=False): + """ + Build validation results screen. + Currently saves the plot from .createPlot() to disk and the loads that as an image. + :param replot: + :return: + """ + if replot or self.imagestim is None: + iname = self._generateImageName() + self.createPlot().savefig(iname, dpi=self.use_dpi) + + text_pos = (0, 0) + text = 'Accuracy Calculation not Possible do to Analysis Error. Press SPACE to continue.' + + if iname: + fig_image = Image.open(iname) + + if self.imagestim: + self.imagestim.setImage(fig_image) + else: + self.imagestim = visual.ImageStim(self.win, image=fig_image, units='pix', pos=(0.0, 0.0)) + + text = 'Press SPACE to continue.' + text_pos = (0.0, -(self.win.size[1] / 2.0) * .9) + else: + self.imagestim = None + + if self.textstim is None: + self.textstim = visual.TextStim(self.win, text=text, pos=text_pos, color=(0, 0, 0), colorSpace='rgb255', + opacity=1.0, contrast=1.0, units='pix', ori=0.0, height=None, + antialias=True, bold=False, italic=False, anchorVert='center', + anchorHoriz='center', wrapWidth=self.win.size[0] * .8) + else: + self.textstim.setText(text) + self.textstim.setPos(text_pos) + + elif self.imagestim: + return True + return False + +from psychopy.iohub.util.visualangle import VisualAngleCalc diff --git a/psychopy/iohub/devices/display/__init__.py b/psychopy/iohub/devices/display/__init__.py index e27b390c162..3272db982a1 100644 --- a/psychopy/iohub/devices/display/__init__.py +++ b/psychopy/iohub/devices/display/__init__.py @@ -231,8 +231,7 @@ def getBounds(self): def getCoordBounds(self): """Get the Display's left, top, right, and bottom border bounds, - specified in the coordinate space returned by - Display.getCoordinateType() + specified in the coordinate space returned by Display.getCoordinateType() Args: None From 92877a53a18e09e4c7f5bfbc12678e225ffd6e73 Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Wed, 31 Mar 2021 14:17:11 -0300 Subject: [PATCH 05/26] FF: Added gaze_cursor_key option to eye tracker validation Default key of 'g' toggles gaze cursor (green gauss) visibility during validation procedure. --- .../iohub/eyetracking/validation/posgrid.py | 3 +- .../coder/iohub/eyetracking/validation/run.py | 187 ++++++------------ .../iohub/eyetracking/validation/trigger.py | 3 + .../validation/validationroutine.py | 67 +++++-- 4 files changed, 125 insertions(+), 135 deletions(-) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/posgrid.py b/psychopy/demos/coder/iohub/eyetracking/validation/posgrid.py index 1f5d2b9e07f..b55fdd6fd53 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation/posgrid.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation/posgrid.py @@ -87,8 +87,7 @@ def __init__(self, self.bounds = bounds if self.bounds is None: self.bounds = ioHubConnection.getActiveConnection().devices.display.getCoordBounds() - print("iohub display units:", ioHubConnection.getActiveConnection().devices.display.getCoordinateType()) - print("Bounds: ", self.bounds) + winSize = self.bounds[2]-self.bounds[0], self.bounds[3]-self.bounds[1] self.firstposindex = firstposindex diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/run.py b/psychopy/demos/coder/iohub/eyetracking/validation/run.py index 87f983ba283..8c66f6c116f 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation/run.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation/run.py @@ -5,45 +5,31 @@ and the ValidationProcedure utility class. """ import time -from weakref import proxy - from psychopy import visual from psychopy.iohub import launchHubServer -from psychopy.iohub.constants import EventConstants -from trigger import TimeTrigger, DeviceEventTrigger -from posgrid import PositionGrid +from trigger import TimeTrigger, KeyboardTrigger from validationroutine import ValidationProcedure class TargetStim(object): - def __init__(self, - win, - radius=None, # The outer radius of the target. - fillcolor=None, # The color used to fill the target body. - edgecolor=None, # The color for the edge around the target. - edgewidth=None, # The thickness of the target outer edge. - dotcolor=None, # The color of the central target dot. - dotradius=None, # The radius to use for the target dot. - units=None, # The psychopy unit type of any size values. - colorspace=None, # The psychopy color space of any colors. - opacity=1.0, # The transparency of the target (0.0 - 1.0) - contrast=1.0 # The contrast of the target stim. - ): + def __init__(self, win, radius=None, fillcolor=None, edgecolor=None, edgewidth=None, + dotcolor=None, dotradius=None, units=None, colorspace=None, opacity=1.0, contrast=1.0): """ TargetStim is a 'doughnut' style target graphic used during the validation procedure. - :param win: - :param radius: - :param fillcolor: - :param edgecolor: - :param edgewidth: - :param dotcolor: - :param dotradius: - :param units: - :param colorspace: - :param opacity: - :param contrast: + :param win: Window being sued for validation. + :param radius: The outer radius of the target. + :param fillcolor: The color used to fill the target body. + :param edgecolor: The color for the edge around the target. + :param edgewidth: The thickness of the target outer edge (always in pixels). + :param dotcolor: The color of the central target dot. + :param dotradius: The radius to use for the target dot. + :param units: The psychopy unit type of any size values. + :param colorspace: The psychopy color space of any colors. + :param opacity: The transparency of the target (0.0 - 1.0). + :param contrast: The contrast of the target stim. """ + from weakref import proxy self.win = proxy(win) self.stim = [] self.radius = radius @@ -87,24 +73,68 @@ def contains(self, p): return self.stim[0].contains(p) +def runValidation(win): + """ + Runs the eye tracker validation procedure using PsychoPy Window win. + This function performs a ValidationProcedure using a validation target + stimulus, a validation position list, and the triggers used to determine + target position progression during the validation procedure. + + :param win: PsychoPy window being used for validation. + :return: + """ + # Create a TargetStim instance + target = TargetStim(win, radius=0.025, fillcolor=[.5, .5, .5], edgecolor=[-1, -1, -1], edgewidth=2, + dotcolor=[1, -1, -1], dotradius=0.005, units='norm', colorspace='rgb') + + positions = [(0.0, 0.0), (0.85, 0.85), (-0.85, 0.0), (0.85, 0.0), (0.85, -0.85), (-0.85, 0.85), + (-0.85, -0.85), (0.0, 0.85), (0.0, -0.85)] + + # Specifiy the Triggers to use to move from target point to point during + # the validation sequence.... + target_triggers = KeyboardTrigger(' ') #TimeTrigger(start_time=None, delay=2.5), + + # Create a validation procedure + validation_proc = ValidationProcedure(win, target, positions, + target_animation_params=dict(velocity=1.0, + expandedscale=3.0, + expansionduration=0.2, + contractionduration=0.4), + background=None, + triggers=target_triggers, + storeeventsfor=None, + accuracy_period_start=0.550, + accuracy_period_stop=.150, + show_intro_screen=True, + intro_text='Validation procedure is now going to be performed.', + show_results_screen=True, + results_in_degrees=False, + randomize_positions=False, + toggle_gaze_cursor_key='g' + ) + + # Run the validation procedure. The run() method does not return until + # the validation is complete. + return validation_proc.run() + + if __name__ == "__main__": # Create a default PsychoPy Window win = visual.Window((1920, 1080), fullscr=True, allowGUI=False, monitor='55w_60dist') - exp_code = 'targetdisplay' + exp_code = 'validation_demo' sess_code = 'S_{0}'.format(int(time.mktime(time.localtime()))) # Create ioHub Server config settings.... iohub_config = dict() iohub_config['experiment_code'] = exp_code iohub_config['session_code'] = sess_code - # Add an eye tracker device et_interface_name = 'eyetracker.hw.mouse.EyeTracker' iohub_config[et_interface_name] = dict(name='tracker') - # Start ioHub event monitoring process.... + # Start the ioHub process. io = launchHubServer(window=win, **iohub_config) # Get the keyboard and mouse devices for future access. @@ -112,97 +142,10 @@ def contains(self, p): tracker = io.devices.tracker experiment = io.devices.experiment - # run eyetracker calibration + # Run eyetracker calibration r = tracker.runSetupProcedure() - - # Create a TargetStim instance - target = TargetStim(win, - radius=0.025, - fillcolor=[.5, .5, .5], - edgecolor=[-1, -1, -1], - edgewidth=2, - dotcolor=[1, -1, -1], - dotradius=0.005, - units='norm', - colorspace='rgb' - ) - - # Create a PositionGrid instance that will hold the locations to display the - # target at. The example lists all possible keyword arguments that are - # supported. If bounds is None, the ioHub Display device is used - # to get the bounding box to be used. - #positions = PositionGrid(bounds=None, # bounding rect of the window, in window unit coords. - # shape=3, # Create a grid with 3 cols * 3 rows. - # posCount=None, - # leftMargin=None, - # rightMargin=None, - # topMargin=None, - # bottomMargin=None, - # scale=0.85, # Equally space the 3x3 grid across 85% - # of the window width and height. - # posList=None, - # noiseStd=None, - # firstposindex=4, # Use the center position grid - # location as the first point in - # the position order. - # repeatFirstPos=True # Redisplay first target position - # as the last target position. - # ) - # randomize the grid position presentation order (not including - # the first position). - #positions.randomize() - #print("positions: ", [(p[0], p[1]) for p in positions.getPositions()]) - - positions = [(0.0, 0.0), (0.85, 0.85), (-0.85, 0.0), (0.85, 0.0), (0.85, -0.85), (-0.85, 0.85), - (-0.85, -0.85), (0.0, 0.85), (0.0, -0.85)] - - - # Specifiy the Triggers to use to move from target point to point during - # the validation sequence.... - - # Use DeviceEventTrigger to create a keyboard event trigger - # which will fire when the space key is pressed. - kb_trigger = DeviceEventTrigger(io.getDevice('keyboard'), - event_type=EventConstants.KEYBOARD_RELEASE, - event_attribute_conditions={'key': ' '}, - repeat_count=0) - - # Creating a list of Trigger instances. The first one that - # fires will cause the start of the next target position - # presentation. - multi_trigger = (TimeTrigger(start_time=None, delay=2.5), kb_trigger) - - - # define a dict containing any animation params to be used, - # None's to disable animation - targ_anim_param = dict(velocity=1.0, # 800.0, - expandedscale=3.0, # 2.0, - expansionduration=0.2, # 0.1, - contractionduration=0.4) # 0.1 - print(win.units) - print(target.stim[0].units) - # Create a validation procedure - vin_txt = 'Validation procedure is now going to be performed.' - validation_proc = ValidationProcedure(win, target, positions, - target_animation_params=targ_anim_param, - background=None, - triggers=multi_trigger, #kb_trigger,#multi_trigger, - storeeventsfor=None, - accuracy_period_start=0.550, - accuracy_period_stop=.150, - show_intro_screen=True, - intro_text=vin_txt, - show_results_screen=True, - results_in_degrees=False, - randomize_positions=False) - - # Run the validation procedure. The run() method does not return until - # the validation is complete. The calculated validation results, and data - # collected for the analysis, are returned. - results = validation_proc.run() - - # The last run validation results can also be retrieved using: - # results = validation_proc.getValidationResults() + # Run eye tracker validation + validation_results = runValidation(win) io.quit() diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/trigger.py b/psychopy/demos/coder/iohub/eyetracking/validation/trigger.py index 2346a0133a1..a2a845cffef 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation/trigger.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation/trigger.py @@ -102,6 +102,7 @@ class TimeTrigger(Trigger): (that takes no parameters). """ def __init__(self, start_time, delay, repeat_count=0, trigger_function=lambda a, b, c: True, user_kwargs={}): + Trigger.io = ioHubConnection.getActiveConnection() Trigger.__init__(self, trigger_function, user_kwargs, repeat_count) self._start_time = start_time @@ -158,6 +159,7 @@ class DeviceEventTrigger(Trigger): _lastEventsByDevice = dict() def __init__(self, device, event_type, event_attribute_conditions={}, repeat_count=-1, trigger_function=lambda a, b, c: True, user_kwargs={}): + Trigger.io = ioHubConnection.getActiveConnection() Trigger.__init__(self, trigger_function, user_kwargs, repeat_count) self.device = device self.event_type = event_type @@ -222,6 +224,7 @@ def resetLastTriggeredInfo(self): class KeyboardTrigger(DeviceEventTrigger): def __init__(self, key, on_press=False): + Trigger.io = ioHubConnection.getActiveConnection() if on_press: etype = EventConstants.KEYBOARD_PRESS else: diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py b/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py index a3faebcd640..6b383d4c280 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py @@ -29,7 +29,7 @@ from psychopy.iohub.constants import EventConstants from psychopy.iohub.client import ioHubConnection from psychopy.tools.monitorunittools import convertToPix -from psychopy.tools.monitorunittools import pix2deg +from psychopy.tools.monitorunittools import pix2deg, deg2pix from posgrid import PositionGrid from trigger import Trigger @@ -41,7 +41,11 @@ def toPix(win, x, y): """Returns the stim's position in pixels, based on its pos, units, and win. """ - xy = np.zeros((len(x), 2)) + try: + xy = np.zeros((len(x), 2)) + except TypeError: + xy = np.zeros((1, 2)) + xy[:, 0] = x xy[:, 1] = y r = convertToPix(np.asarray((0, 0)), xy, win.units, win) @@ -49,7 +53,10 @@ def toPix(win, x, y): def toDeg(win, x, y): - xy = np.zeros((len(x), 2)) + try: + xy = np.zeros((len(x), 2)) + except TypeError: + xy = np.zeros((1, 2)) xy[:, 0] = x xy[:, 1] = y r = pix2deg(xy, win.monitor, correctFlat=False) @@ -102,7 +109,7 @@ class TargetPosSequenceStim(object): ('pupil_size', np.float64)] def __init__(self, win, target, positions, background=None, storeeventsfor=[], triggers=None, msgcategory='', - config=None, io=None, terminate_key='escape'): + config=None, io=None, terminate_key='escape', gaze_cursor_key='g'): """ TargetPosSequenceStim combines an instance of a Target stim and an instance of a PositionGrid to create everything needed to present the @@ -136,6 +143,11 @@ def __init__(self, win, target, positions, background=None, storeeventsfor=[], t :param io: """ self.terminate_key = terminate_key + self.gaze_cursor_key = gaze_cursor_key + self.display_gaze = False + gc_size = deg2pix(3.0, win.monitor, correctFlat=False) + self.gaze_cursor = visual.GratingStim(win, tex=None, mask='gauss', pos=(0, 0), size=(gc_size, gc_size), + color='green', units='pix', opacity=0.8) self._terminate_requested = False self.win = proxy(win) self.target = target @@ -172,6 +184,14 @@ def _draw(self): if self.background: self.background.draw() self.target.draw() + if self.display_gaze: + gpos = self.io.devices.tracker.getLastGazePosition() + valid_gaze_pos = isinstance(gpos, (tuple, list)) + if valid_gaze_pos: + pix_pos = toPix(self.win, *gpos) + pix_pos = pix_pos[0][0], pix_pos[1][0] + self.gaze_cursor.setPos(pix_pos) + self.gaze_cursor.draw() def _animateTarget(self, topos, frompos, **kwargs): """ @@ -273,9 +293,16 @@ def moveTo(self, topos, frompos, **kwargs): if getTime() - last_pump_time >= 0.250: win32MessagePump() last_pump_time = getTime() - sleep(0.001) + + if self.display_gaze: + self._draw() + self.win.flip() + else: + sleep(0.001) + if self._checkForTerminate(): return + self._checkForToggleGaze() trig_fired = self._hasTriggerFired(start_time=fliptime) def _hasTriggerFired(self, **kwargs): @@ -319,6 +346,7 @@ def _initTargetData(self, frompos, topos): def _addDeviceEvents(self, device_event_dict={}): if self._checkForTerminate(): return + self._checkForToggleGaze() dev_event_buffer = self.targetdata[-1]['events'] for dev, dev_events in dev_event_buffer.items(): if dev in device_event_dict: @@ -327,9 +355,25 @@ def _addDeviceEvents(self, device_event_dict={}): dev_events.extend(dev.getEvents()) def _checkForTerminate(self): - self._terminate_requested = len(self.io.devices.keyboard.getPresses(keys=self.terminate_key, clear=False)) > 0 + keys = self.io.devices.keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=False) + for k in keys: + if k.key == self.terminate_key: + self._terminate_requested = True + break return self._terminate_requested + def _checkForToggleGaze(self): + keys = self.io.devices.keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=False) + for k in keys: + if k.key == self.gaze_cursor_key: + # get (clear) the event so it does not trigger multiple times. + self.io.devices.keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=True) + self.display_gaze = not self.display_gaze + self._draw() + self.win.flip() + return self.display_gaze + return self.display_gaze + def display(self, **kwargs): """ Display the target at each point in the position grid, performing @@ -426,8 +470,7 @@ def display(self, **kwargs): io.sendMessageEvent('VALIDATION TERMINATED BY USER', self.msgcategory) return False - io.sendMessageEvent('DONE_SEQUENCE {0}'.format( - len(self.positions.positions)), self.msgcategory) + io.sendMessageEvent('DONE_SEQUENCE {0}'.format( len(self.positions.positions)), self.msgcategory) sleep(0.025) self._addDeviceEvents() io.clearEvents('all') @@ -585,7 +628,7 @@ def __init__(self, win=None, target=None, positions=None, target_animation_param background=None, triggers=2.0, storeeventsfor=None, accuracy_period_start=0.350, accuracy_period_stop=.050, show_intro_screen=True, intro_text='Ready to Start Validation Procedure.', show_results_screen=True, results_in_degrees=False, save_figure_path=None, - terminate_key="escape"): + terminate_key="escape", toggle_gaze_cursor_key="g"): """ ValidationProcedure can be used to check the accuracy of a calibrated eye tracking system. @@ -672,13 +715,15 @@ def __init__(self, win=None, target=None, positions=None, target_animation_param :param results_in_degrees: :param save_figure_path: :param terminate_key: + :param toggle_gaze_cursor_key: """ self.terminate_key = terminate_key + self.toggle_gaze_cursor_key = toggle_gaze_cursor_key + self.io = ioHubConnection.getActiveConnection() if isinstance(positions, (list, tuple)): positions = PositionGrid(posList=positions, firstposindex=0, repeatFirstPos=False) - self.positions = positions self.randomize_positions = randomize_positions @@ -707,7 +752,7 @@ def __init__(self, win=None, target=None, positions=None, target_animation_param triggers = Trigger.getTriggersFrom(triggers) self.targetsequence = TargetPosSequenceStim(win, target=target, positions=self.positions, background=background, triggers=triggers, storeeventsfor=storeeventsfor, - terminate_key=terminate_key) + terminate_key=terminate_key, gaze_cursor_key=toggle_gaze_cursor_key) # Stim for results screen self.imagestim = None self.textstim = None From f16048c33c1e065c6daa2ff2320f5c3090ef8a3a Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Wed, 31 Mar 2021 14:28:43 -0300 Subject: [PATCH 06/26] FF: validation intro and results screen triggers Only continues on space key press now instead of any key press. --- .../demos/coder/iohub/eyetracking/validation/run.py | 2 +- .../eyetracking/validation/validationroutine.py | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/run.py b/psychopy/demos/coder/iohub/eyetracking/validation/run.py index 8c66f6c116f..9911851b931 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation/run.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation/run.py @@ -92,7 +92,7 @@ def runValidation(win): # Specifiy the Triggers to use to move from target point to point during # the validation sequence.... - target_triggers = KeyboardTrigger(' ') #TimeTrigger(start_time=None, delay=2.5), + target_triggers = KeyboardTrigger(' ', on_press=True) #TimeTrigger(start_time=None, delay=2.5), # Create a validation procedure validation_proc = ValidationProcedure(win, target, positions, diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py b/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py index 6b383d4c280..a74c047a7a4 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py @@ -159,6 +159,7 @@ def __init__(self, win, target, positions, background=None, storeeventsfor=[], t if io is None: io = ioHubConnection.getActiveConnection() self.io = io + self._keyboard = self.io.devices.keyboard # If storeevents is True, targetdata will be a list of dict's. # Each dict, among other things, contains all ioHub events that occurred @@ -355,7 +356,7 @@ def _addDeviceEvents(self, device_event_dict={}): dev_events.extend(dev.getEvents()) def _checkForTerminate(self): - keys = self.io.devices.keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=False) + keys = self._keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=False) for k in keys: if k.key == self.terminate_key: self._terminate_requested = True @@ -363,11 +364,11 @@ def _checkForTerminate(self): return self._terminate_requested def _checkForToggleGaze(self): - keys = self.io.devices.keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=False) + keys = self._keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=False) for k in keys: if k.key == self.gaze_cursor_key: # get (clear) the event so it does not trigger multiple times. - self.io.devices.keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=True) + self._keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=True) self.display_gaze = not self.display_gaze self._draw() self.win.flip() @@ -765,10 +766,11 @@ def run(self): b) display of the target position sequence used for validation data collection. c) display of a validation accuracy results plot. """ + keyboard = self.io.devices.keyboard if self.show_intro_screen: # Display Validation Intro Screen self.showIntroScreen() - if self.terminate_key and self.terminate_key in self.io.devices.keyboard.waitForReleases(keys=None): + if self.terminate_key and self.terminate_key in keyboard.waitForReleases(keys=[' ', self.terminate_key]): print("Escape key pressed. Exiting validation") self.validation_results = None return @@ -786,7 +788,7 @@ def run(self): if self.show_results_screen: if self.showResultsScreen() is not None: - if self.terminate_key and self.terminate_key in self.io.devices.keyboard.waitForReleases(keys=None): + if self.terminate_key and self.terminate_key in keyboard.waitForPresses(keys=[' ', self.terminate_key]): print("Escape key pressed. Exiting validation") self.validation_results = None return From 28868815cc2ec887cef9e01a60b3ee637d650a35 Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Wed, 31 Mar 2021 14:46:54 -0300 Subject: [PATCH 07/26] FF: simplified validation demo script Also made space key default trigger for validation targets. --- .../coder/iohub/eyetracking/validation/run.py | 79 ++++++++----------- .../validation/validationroutine.py | 11 ++- 2 files changed, 39 insertions(+), 51 deletions(-) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/run.py b/psychopy/demos/coder/iohub/eyetracking/validation/run.py index 9911851b931..f41f72f1bf5 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation/run.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation/run.py @@ -8,7 +8,6 @@ from psychopy import visual from psychopy.iohub import launchHubServer -from trigger import TimeTrigger, KeyboardTrigger from validationroutine import ValidationProcedure class TargetStim(object): @@ -73,51 +72,6 @@ def contains(self, p): return self.stim[0].contains(p) -def runValidation(win): - """ - Runs the eye tracker validation procedure using PsychoPy Window win. - This function performs a ValidationProcedure using a validation target - stimulus, a validation position list, and the triggers used to determine - target position progression during the validation procedure. - - :param win: PsychoPy window being used for validation. - :return: - """ - # Create a TargetStim instance - target = TargetStim(win, radius=0.025, fillcolor=[.5, .5, .5], edgecolor=[-1, -1, -1], edgewidth=2, - dotcolor=[1, -1, -1], dotradius=0.005, units='norm', colorspace='rgb') - - positions = [(0.0, 0.0), (0.85, 0.85), (-0.85, 0.0), (0.85, 0.0), (0.85, -0.85), (-0.85, 0.85), - (-0.85, -0.85), (0.0, 0.85), (0.0, -0.85)] - - # Specifiy the Triggers to use to move from target point to point during - # the validation sequence.... - target_triggers = KeyboardTrigger(' ', on_press=True) #TimeTrigger(start_time=None, delay=2.5), - - # Create a validation procedure - validation_proc = ValidationProcedure(win, target, positions, - target_animation_params=dict(velocity=1.0, - expandedscale=3.0, - expansionduration=0.2, - contractionduration=0.4), - background=None, - triggers=target_triggers, - storeeventsfor=None, - accuracy_period_start=0.550, - accuracy_period_stop=.150, - show_intro_screen=True, - intro_text='Validation procedure is now going to be performed.', - show_results_screen=True, - results_in_degrees=False, - randomize_positions=False, - toggle_gaze_cursor_key='g' - ) - - # Run the validation procedure. The run() method does not return until - # the validation is complete. - return validation_proc.run() - - if __name__ == "__main__": # Create a default PsychoPy Window win = visual.Window((1920, 1080), fullscr=True, allowGUI=False, monitor='55w_60dist') @@ -145,7 +99,36 @@ def runValidation(win): # Run eyetracker calibration r = tracker.runSetupProcedure() - # Run eye tracker validation - validation_results = runValidation(win) + # ValidationProcedure setup + + # Create a TargetStim instance + target_stim = TargetStim(win, radius=0.025, fillcolor=[.5, .5, .5], edgecolor=[-1, -1, -1], edgewidth=2, + dotcolor=[1, -1, -1], dotradius=0.005, units='norm', colorspace='rgb') + + # target_positions: Provide your own list of validation positions, + # or use the PositionGrid class to generate a set. + target_positions = [(0.0, 0.0), (0.85, 0.85), (-0.85, 0.0), (0.85, 0.0), (0.85, -0.85), (-0.85, 0.85), + (-0.85, -0.85), (0.0, 0.85), (0.0, -0.85)] + + # Create a validation procedure + validation_proc = ValidationProcedure(win, + target=target_stim, + positions=target_positions, + target_animation_params=dict(velocity=1.0, + expandedscale=3.0, + expansionduration=0.2, + contractionduration=0.4), + accuracy_period_start=0.550, + accuracy_period_stop=.150, + show_intro_screen=True, + intro_text='Validation procedure is now going to be performed.', + show_results_screen=True, + results_in_degrees=False, + randomize_positions=False, + toggle_gaze_cursor_key='g', + terminate_key='escape') + + # Run the validation procedure. run() does not return until the validation is complete. + validation_results = validation_proc.run() io.quit() diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py b/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py index a74c047a7a4..14a12b89942 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py @@ -32,7 +32,7 @@ from psychopy.tools.monitorunittools import pix2deg, deg2pix from posgrid import PositionGrid -from trigger import Trigger +from trigger import Trigger, KeyboardTrigger getTime = core.getTime @@ -626,7 +626,7 @@ def getSampleData(s): class ValidationProcedure(object): def __init__(self, win=None, target=None, positions=None, target_animation_params={}, randomize_positions=True, - background=None, triggers=2.0, storeeventsfor=None, accuracy_period_start=0.350, + background=None, triggers=None, storeeventsfor=None, accuracy_period_start=0.350, accuracy_period_stop=.050, show_intro_screen=True, intro_text='Ready to Start Validation Procedure.', show_results_screen=True, results_in_degrees=False, save_figure_path=None, terminate_key="escape", toggle_gaze_cursor_key="g"): @@ -748,9 +748,14 @@ def __init__(self, win=None, target=None, positions=None, target_animation_param self.io.devices.tracker, self.io.devices.experiment ] + + if triggers is None: + # Use space key press as default target trigger + triggers = KeyboardTrigger(' ', on_press=True) + triggers = Trigger.getTriggersFrom(triggers) + # Create the TargetPosSequenceStim instance; used to control the sequential # presentation of the target at each of the grid positions. - triggers = Trigger.getTriggersFrom(triggers) self.targetsequence = TargetPosSequenceStim(win, target=target, positions=self.positions, background=background, triggers=triggers, storeeventsfor=storeeventsfor, terminate_key=terminate_key, gaze_cursor_key=toggle_gaze_cursor_key) From 6e24117806144bf90a48464ab661210f9cebaa92 Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Wed, 31 Mar 2021 14:58:38 -0300 Subject: [PATCH 08/26] RF: Move default validation target stim to module file Working on reducing size (number of lines) needed for basic validation demo. --- .../coder/iohub/eyetracking/validation/run.py | 66 +-------------- .../validation/validationroutine.py | 83 ++++++++++++++++--- 2 files changed, 75 insertions(+), 74 deletions(-) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/run.py b/psychopy/demos/coder/iohub/eyetracking/validation/run.py index f41f72f1bf5..17b3da971e3 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation/run.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation/run.py @@ -8,75 +8,13 @@ from psychopy import visual from psychopy.iohub import launchHubServer -from validationroutine import ValidationProcedure - -class TargetStim(object): - def __init__(self, win, radius=None, fillcolor=None, edgecolor=None, edgewidth=None, - dotcolor=None, dotradius=None, units=None, colorspace=None, opacity=1.0, contrast=1.0): - """ - TargetStim is a 'doughnut' style target graphic used during the validation procedure. - - :param win: Window being sued for validation. - :param radius: The outer radius of the target. - :param fillcolor: The color used to fill the target body. - :param edgecolor: The color for the edge around the target. - :param edgewidth: The thickness of the target outer edge (always in pixels). - :param dotcolor: The color of the central target dot. - :param dotradius: The radius to use for the target dot. - :param units: The psychopy unit type of any size values. - :param colorspace: The psychopy color space of any colors. - :param opacity: The transparency of the target (0.0 - 1.0). - :param contrast: The contrast of the target stim. - """ - from weakref import proxy - self.win = proxy(win) - self.stim = [] - self.radius = radius - outer = visual.Circle(self.win, radius=radius, fillColor=fillcolor, lineColor=edgecolor, lineWidth=edgewidth, - edges=32, units=units, colorSpace=colorspace, opacity=opacity, - contrast=contrast, interpolate=True, autoLog=False) - self.stim.append(outer) - - if dotcolor and dotcolor != fillcolor: - centerdot = visual.Circle(self.win, radius=dotradius, fillColor=dotcolor, lineColor=dotcolor, - lineWidth=0.0, edges=32, interpolate=True, units=units, - colorSpace=colorspace, opacity=opacity, contrast=contrast, autoLog=False) - self.stim.append(centerdot) - - def setRadius(self, r): - """ - Update the radius of the target stim. - """ - self.stim[0].radius = r - - def setPos(self, pos): - """ - Set the center position of the target stim. - """ - for s in self.stim: - s.setPos(pos) - - def draw(self): - """ - Draw the Target stim. - """ - for s in self.stim: - s.draw() - - def contains(self, p): - """ - Is point p contained within the Target Stim? - :param p: x, y position in stim units - :return: bool - """ - return self.stim[0].contains(p) - +from validationroutine import TargetStim, ValidationProcedure if __name__ == "__main__": # Create a default PsychoPy Window + # monitor *must* be the name of a valid PsychoPy Monitor config file. win = visual.Window((1920, 1080), fullscr=True, allowGUI=False, monitor='55w_60dist') - exp_code = 'validation_demo' sess_code = 'S_{0}'.format(int(time.mktime(time.localtime()))) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py b/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py index 14a12b89942..cd778f99663 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py @@ -5,12 +5,12 @@ To use the validation process from within a Coder script: * Create an instance of TargetStim, specifying the fixation target appearance. * Create an instance of PositionGrid, which defines target position information. -* Create a TargetPosSequenceStim instance, providing the TargetStim and +* Create a ValidationTargetRenderer instance, providing the TargetStim and PositionGrid objects created, as well as the Trigger's which should be used to transition from one target position to another during the sequence of target graphics presentation and the defined positions. -* Use TargetPosSequenceStim.display() to run the full presentation procedure. -* Use TargetPosSequenceStim.targetdata to access information about each target +* Use ValidationTargetRenderer.display() to run the full presentation procedure. +* Use ValidationTargetRenderer.targetdata to access information about each target position displayed and the events collected during the display duration for each position. @@ -62,7 +62,70 @@ def toDeg(win, x, y): r = pix2deg(xy, win.monitor, correctFlat=False) return r[:, 0], r[:, 1] -class TargetPosSequenceStim(object): + +class TargetStim(object): + def __init__(self, win, radius=None, fillcolor=None, edgecolor=None, edgewidth=None, + dotcolor=None, dotradius=None, units=None, colorspace=None, opacity=1.0, contrast=1.0): + """ + TargetStim is a 'doughnut' style target graphic used during the validation procedure. + + :param win: Window being sued for validation. + :param radius: The outer radius of the target. + :param fillcolor: The color used to fill the target body. + :param edgecolor: The color for the edge around the target. + :param edgewidth: The thickness of the target outer edge (always in pixels). + :param dotcolor: The color of the central target dot. + :param dotradius: The radius to use for the target dot. + :param units: The psychopy unit type of any size values. + :param colorspace: The psychopy color space of any colors. + :param opacity: The transparency of the target (0.0 - 1.0). + :param contrast: The contrast of the target stim. + """ + from weakref import proxy + self.win = proxy(win) + self.stim = [] + self.radius = radius + outer = visual.Circle(self.win, radius=radius, fillColor=fillcolor, lineColor=edgecolor, lineWidth=edgewidth, + edges=32, units=units, colorSpace=colorspace, opacity=opacity, + contrast=contrast, interpolate=True, autoLog=False) + self.stim.append(outer) + + if dotcolor and dotcolor != fillcolor: + centerdot = visual.Circle(self.win, radius=dotradius, fillColor=dotcolor, lineColor=dotcolor, + lineWidth=0.0, edges=32, interpolate=True, units=units, + colorSpace=colorspace, opacity=opacity, contrast=contrast, autoLog=False) + self.stim.append(centerdot) + + def setRadius(self, r): + """ + Update the radius of the target stim. + """ + self.stim[0].radius = r + + def setPos(self, pos): + """ + Set the center position of the target stim. + """ + for s in self.stim: + s.setPos(pos) + + def draw(self): + """ + Draw the Target stim. + """ + for s in self.stim: + s.draw() + + def contains(self, p): + """ + Is point p contained within the Target Stim? + :param p: x, y position in stim units + :return: bool + """ + return self.stim[0].contains(p) + + +class ValidationTargetRenderer(object): TARGET_STATIONARY = 1 TARGET_MOVING = 2 TARGET_EXPANDING = 4 @@ -111,13 +174,13 @@ class TargetPosSequenceStim(object): def __init__(self, win, target, positions, background=None, storeeventsfor=[], triggers=None, msgcategory='', config=None, io=None, terminate_key='escape', gaze_cursor_key='g'): """ - TargetPosSequenceStim combines an instance of a Target stim and an + ValidationTargetRenderer combines an instance of a Target stim and an instance of a PositionGrid to create everything needed to present the target at each position returned by the PositionGrid instance within the psychopy window used to create the Target stim. The target is presented at each position sequentially. - By providing keyword arguments to the TargetPosSequenceStim.display(...) + By providing keyword arguments to the ValidationTargetRenderer.display(...) method, position animation between target positions, and target stim expansion and / or contraction transitions are possible. @@ -754,11 +817,11 @@ def __init__(self, win=None, target=None, positions=None, target_animation_param triggers = KeyboardTrigger(' ', on_press=True) triggers = Trigger.getTriggersFrom(triggers) - # Create the TargetPosSequenceStim instance; used to control the sequential + # Create the ValidationTargetRenderer instance; used to control the sequential # presentation of the target at each of the grid positions. - self.targetsequence = TargetPosSequenceStim(win, target=target, positions=self.positions, background=background, - triggers=triggers, storeeventsfor=storeeventsfor, - terminate_key=terminate_key, gaze_cursor_key=toggle_gaze_cursor_key) + self.targetsequence = ValidationTargetRenderer(win, target=target, positions=self.positions, background=background, + triggers=triggers, storeeventsfor=storeeventsfor, + terminate_key=terminate_key, gaze_cursor_key=toggle_gaze_cursor_key) # Stim for results screen self.imagestim = None self.textstim = None From 61ea18047694199df60f4e81bc3a7d026ecac532 Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Wed, 31 Mar 2021 15:28:26 -0300 Subject: [PATCH 09/26] RF: Created iohub.client.eyetracker.validation module Moved code that should be part of validation module into, well, a module: iohub.client.eyetracker.validation. ;) Moved validation demo to coder/iohub/eyetracking/validation.py --- .../{validation/run.py => validation.py} | 33 ++++++++++--------- psychopy/iohub/client/eyetracker/__init__.py | 0 .../client/eyetracker/validation/__init__.py | 3 ++ .../client/eyetracker}/validation/posgrid.py | 0 .../eyetracker/validation/procedure.py} | 13 ++++---- .../client/eyetracker}/validation/trigger.py | 0 6 files changed, 26 insertions(+), 23 deletions(-) rename psychopy/demos/coder/iohub/eyetracking/{validation/run.py => validation.py} (76%) create mode 100644 psychopy/iohub/client/eyetracker/__init__.py create mode 100644 psychopy/iohub/client/eyetracker/validation/__init__.py rename psychopy/{demos/coder/iohub/eyetracking => iohub/client/eyetracker}/validation/posgrid.py (100%) rename psychopy/{demos/coder/iohub/eyetracking/validation/validationroutine.py => iohub/client/eyetracker/validation/procedure.py} (99%) rename psychopy/{demos/coder/iohub/eyetracking => iohub/client/eyetracker}/validation/trigger.py (100%) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/run.py b/psychopy/demos/coder/iohub/eyetracking/validation.py similarity index 76% rename from psychopy/demos/coder/iohub/eyetracking/validation/run.py rename to psychopy/demos/coder/iohub/eyetracking/validation.py index 17b3da971e3..afb200f4dda 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation/run.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation.py @@ -1,14 +1,13 @@ #!/usr/bin/env python2 # -*- coding: utf-8 -*- """ -Example of performing eye tracker validation using ioHub Common Eye Tracker interface -and the ValidationProcedure utility class. +Example of performing eye tracker validation using the ioHub Common Eye Tracker interface +and the psychopy.iohub.client.eyetracker.validation.ValidationProcedure class. """ import time from psychopy import visual from psychopy.iohub import launchHubServer - -from validationroutine import TargetStim, ValidationProcedure +from psychopy.iohub.client.eyetracker.validation import TargetStim, ValidationProcedure if __name__ == "__main__": # Create a default PsychoPy Window @@ -52,21 +51,23 @@ validation_proc = ValidationProcedure(win, target=target_stim, positions=target_positions, - target_animation_params=dict(velocity=1.0, - expandedscale=3.0, - expansionduration=0.2, - contractionduration=0.4), - accuracy_period_start=0.550, - accuracy_period_stop=.150, + randomize_positions=False, + target_animation=dict(velocity=1.0, + expandedscale=3.0, + expansionduration=0.2, + contractionduration=0.4), + accuracy_period_start=0.550, accuracy_period_stop=.150, show_intro_screen=True, intro_text='Validation procedure is now going to be performed.', - show_results_screen=True, - results_in_degrees=False, - randomize_positions=False, - toggle_gaze_cursor_key='g', - terminate_key='escape') + show_results_screen=True, results_in_degrees=False, + toggle_gaze_cursor_key='g', terminate_key='escape') # Run the validation procedure. run() does not return until the validation is complete. - validation_results = validation_proc.run() + validation_results = validation_proc.run() + print("++++ Validation Results ++++") + print("Units:", validation_results['reporting_unit_type']) + print("min_error:", validation_results['min_error']) + print("max_error:", validation_results['max_error']) + print("mean_error:", validation_results['mean_error']) io.quit() diff --git a/psychopy/iohub/client/eyetracker/__init__.py b/psychopy/iohub/client/eyetracker/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/psychopy/iohub/client/eyetracker/validation/__init__.py b/psychopy/iohub/client/eyetracker/validation/__init__.py new file mode 100644 index 00000000000..101bc600ad5 --- /dev/null +++ b/psychopy/iohub/client/eyetracker/validation/__init__.py @@ -0,0 +1,3 @@ +from .posgrid import PositionGrid +from .trigger import Trigger, KeyboardTrigger, DeviceEventTrigger, TimeTrigger +from .procedure import TargetStim, ValidationProcedure \ No newline at end of file diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/posgrid.py b/psychopy/iohub/client/eyetracker/validation/posgrid.py similarity index 100% rename from psychopy/demos/coder/iohub/eyetracking/validation/posgrid.py rename to psychopy/iohub/client/eyetracker/validation/posgrid.py diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py b/psychopy/iohub/client/eyetracker/validation/procedure.py similarity index 99% rename from psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py rename to psychopy/iohub/client/eyetracker/validation/procedure.py index cd778f99663..17b411df9e7 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation/validationroutine.py +++ b/psychopy/iohub/client/eyetracker/validation/procedure.py @@ -31,8 +31,7 @@ from psychopy.tools.monitorunittools import convertToPix from psychopy.tools.monitorunittools import pix2deg, deg2pix -from posgrid import PositionGrid -from trigger import Trigger, KeyboardTrigger +from psychopy.iohub.client.eyetracker.validation import PositionGrid, Trigger, KeyboardTrigger getTime = core.getTime @@ -688,7 +687,7 @@ def getSampleData(s): class ValidationProcedure(object): - def __init__(self, win=None, target=None, positions=None, target_animation_params={}, randomize_positions=True, + def __init__(self, win=None, target=None, positions=None, target_animation={}, randomize_positions=True, background=None, triggers=None, storeeventsfor=None, accuracy_period_start=0.350, accuracy_period_stop=.050, show_intro_screen=True, intro_text='Ready to Start Validation Procedure.', show_results_screen=True, results_in_degrees=False, save_figure_path=None, @@ -766,7 +765,7 @@ def __init__(self, win=None, target=None, positions=None, target_animation_param :param win: :param target: :param positions: - :param target_animation_params: + :param target_animation: :param randomize_positions: :param background: :param triggers: @@ -794,9 +793,9 @@ def __init__(self, win=None, target=None, positions=None, target_animation_param if self.randomize_positions: self.positions.randomize() self.win = proxy(win) - if target_animation_params is None: - target_animation_params = {} - self.animation_params = target_animation_params + if target_animation is None: + target_animation = {} + self.animation_params = target_animation self.accuracy_period_start = accuracy_period_start self.accuracy_period_stop = accuracy_period_stop self.show_intro_screen = show_intro_screen diff --git a/psychopy/demos/coder/iohub/eyetracking/validation/trigger.py b/psychopy/iohub/client/eyetracker/validation/trigger.py similarity index 100% rename from psychopy/demos/coder/iohub/eyetracking/validation/trigger.py rename to psychopy/iohub/client/eyetracker/validation/trigger.py From d9a970ebaa263e7c550e56a8f95a68875909e1cb Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Wed, 31 Mar 2021 16:04:00 -0300 Subject: [PATCH 10/26] FF: more validation error checking validation results 'passed' == True if all validation points had >=1 valid eye sample, False otherwise. --- .../coder/iohub/eyetracking/validation.py | 2 + .../client/eyetracker/validation/procedure.py | 38 +++++++++++-------- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation.py b/psychopy/demos/coder/iohub/eyetracking/validation.py index afb200f4dda..b7ccfbd4180 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation.py @@ -66,6 +66,8 @@ validation_results = validation_proc.run() print("++++ Validation Results ++++") + print("Passed:", validation_results['passed']) + print("failed_pos_count:", validation_results['positions_failed_processing']) print("Units:", validation_results['reporting_unit_type']) print("min_error:", validation_results['min_error']) print("max_error:", validation_results['max_error']) diff --git a/psychopy/iohub/client/eyetracker/validation/procedure.py b/psychopy/iohub/client/eyetracker/validation/procedure.py index 17b411df9e7..9d8d9426a11 100644 --- a/psychopy/iohub/client/eyetracker/validation/procedure.py +++ b/psychopy/iohub/client/eyetracker/validation/procedure.py @@ -930,20 +930,28 @@ def _createValidationResults(self): stationary_samples = samplesforpos[samplesforpos['targ_state'] == self.targetsequence.TARGET_STATIONARY] - last_stime = stationary_samples[-1]['eye_time'] - first_stime = stationary_samples[0]['eye_time'] + if len(stationary_samples): + last_stime = stationary_samples[-1]['eye_time'] + first_stime = stationary_samples[0]['eye_time'] - filter_stime = last_stime - self.accuracy_period_start - filter_etime = last_stime - self.accuracy_period_stop + filter_stime = last_stime - self.accuracy_period_start + filter_etime = last_stime - self.accuracy_period_stop - all_samples_for_accuracy_calc = stationary_samples[stationary_samples['eye_time'] >= filter_stime] - all_samples_for_accuracy_calc = all_samples_for_accuracy_calc[all_samples_for_accuracy_calc['eye_time'] < filter_etime] + all_samples_for_accuracy_calc = stationary_samples[stationary_samples['eye_time'] >= filter_stime] + all_samples_for_accuracy_calc = all_samples_for_accuracy_calc[all_samples_for_accuracy_calc['eye_time'] < filter_etime] - good_samples_for_accuracy_calc = all_samples_for_accuracy_calc[all_samples_for_accuracy_calc['eye_status'] <= 1] + good_samples_for_accuracy_calc = all_samples_for_accuracy_calc[all_samples_for_accuracy_calc['eye_status'] <= 1] - all_samples_for_accuracy_count = all_samples_for_accuracy_calc.shape[0] - good_accuracy_sample_count = good_samples_for_accuracy_calc.shape[0] - accuracy_calc_good_sample_perc = good_accuracy_sample_count / float(all_samples_for_accuracy_count) + all_samples_for_accuracy_count = all_samples_for_accuracy_calc.shape[0] + good_accuracy_sample_count = good_samples_for_accuracy_calc.shape[0] + try: + accuracy_calc_good_sample_perc = good_accuracy_sample_count / float(all_samples_for_accuracy_count) + except ZeroDivisionError: + accuracy_calc_good_sample_perc = 0 + else: + all_samples_for_accuracy_calc = [] + good_samples_for_accuracy_calc = [] + accuracy_calc_good_sample_perc = 0 # Ordered dictionary of the different levels of samples selected during filtering # for valid samples to use in accuracy calculations. @@ -968,7 +976,7 @@ def _createValidationResults(self): position_results['sample_from_filter_stages'] = sample_msg_data_filtering - if accuracy_calc_good_sample_perc == 0.0: + if int(accuracy_calc_good_sample_perc*100) == 0: position_results['calculation_status'] = 'FAILED' results['positions_failed_processing'] += 1 else: @@ -1031,7 +1039,8 @@ def _createValidationResults(self): unit_type = 'degrees' mean_error = summed_error / point_count err_results = dict(reporting_unit_type=unit_type, min_error=min_error, max_error=max_error, - mean_error=mean_error) + mean_error=mean_error, passed=results['positions_failed_processing'] == 0, + positions_failed_processing=results['positions_failed_processing']) for k, v in err_results.items(): self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') @@ -1064,7 +1073,6 @@ def createPlot(self): pindex = position_results['pos_index'] if position_results['calculation_status'] == 'FAILED': # Draw nothing for failed position - # TODO: Draw something. ;) pass else: samples = position_results['sample_from_filter_stages']['used_samples'] @@ -1078,11 +1086,10 @@ def createPlot(self): gaze_x = samples[:]['eye_x'] gaze_y = samples[:]['eye_y'] - normed_time = (time - time.min()) / (time.max() - time.min()) pl.scatter(target_x[0], target_y[0], s=400, color=[0.75, 0.75, 0.75], alpha=0.5) pl.text(target_x[0], target_y[0], str(pindex), size=11, horizontalalignment='center', verticalalignment='center') - pl.scatter(gaze_x, gaze_y, s=40, c=normed_time, cmap=cm, alpha=0.75) + pl.scatter(gaze_x, gaze_y, s=40, c='g', alpha=0.75) if self.results_in_degrees: l, b = toDeg(self.win, (-pixw / 2,), (-pixh / 2, )) @@ -1099,7 +1106,6 @@ def createPlot(self): results['max_error'], results['mean_error'])) - # pl.colorbar() fig.tight_layout() return fig From b2ad1e938e53b321bbac25f949a8a5bbd091f913 Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Thu, 1 Apr 2021 07:05:46 -0300 Subject: [PATCH 11/26] RF: Misc cleanup, better results plot colors --- .../coder/iohub/eyetracking/validation.py | 38 ++++----- .../client/eyetracker/validation/procedure.py | 79 ++++++++----------- 2 files changed, 52 insertions(+), 65 deletions(-) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation.py b/psychopy/demos/coder/iohub/eyetracking/validation.py index b7ccfbd4180..2714e8ce503 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation.py @@ -14,33 +14,27 @@ # monitor *must* be the name of a valid PsychoPy Monitor config file. win = visual.Window((1920, 1080), fullscr=True, allowGUI=False, monitor='55w_60dist') - exp_code = 'validation_demo' + # Create ioHub Server config .... sess_code = 'S_{0}'.format(int(time.mktime(time.localtime()))) - - # Create ioHub Server config settings.... - iohub_config = dict() - iohub_config['experiment_code'] = exp_code - iohub_config['session_code'] = sess_code + iohub_config = dict(experiment_code='validation_demo', session_code=sess_code) # Add an eye tracker device - et_interface_name = 'eyetracker.hw.mouse.EyeTracker' - iohub_config[et_interface_name] = dict(name='tracker') + iohub_config['eyetracker.hw.mouse.EyeTracker'] = dict(name='tracker') # Start the ioHub process. io = launchHubServer(window=win, **iohub_config) - # Get the keyboard and mouse devices for future access. - keyboard = io.devices.keyboard + # Get the eye tracker device. tracker = io.devices.tracker - experiment = io.devices.experiment # Run eyetracker calibration r = tracker.runSetupProcedure() # ValidationProcedure setup - # Create a TargetStim instance + # Create a target stim. iohub.client.eyetracker.validation.TargetStim provides a standard doughnut style + # target. Or use any stim that has `.setPos()`, `.setRadius()`, and `.draw()` methods. target_stim = TargetStim(win, radius=0.025, fillcolor=[.5, .5, .5], edgecolor=[-1, -1, -1], edgewidth=2, - dotcolor=[1, -1, -1], dotradius=0.005, units='norm', colorspace='rgb') + dotcolor=[1, -1, -1], dotradius=0.005, units='norm', colorspace='rgb') # target_positions: Provide your own list of validation positions, # or use the PositionGrid class to generate a set. @@ -64,12 +58,14 @@ # Run the validation procedure. run() does not return until the validation is complete. validation_results = validation_proc.run() - - print("++++ Validation Results ++++") - print("Passed:", validation_results['passed']) - print("failed_pos_count:", validation_results['positions_failed_processing']) - print("Units:", validation_results['reporting_unit_type']) - print("min_error:", validation_results['min_error']) - print("max_error:", validation_results['max_error']) - print("mean_error:", validation_results['mean_error']) + if validation_results: + print("++++ Validation Results ++++") + print("Passed:", validation_results['passed']) + print("failed_pos_count:", validation_results['positions_failed_processing']) + print("Units:", validation_results['reporting_unit_type']) + print("min_error:", validation_results['min_error']) + print("max_error:", validation_results['max_error']) + print("mean_error:", validation_results['mean_error']) + else: + print("Validation Aborted by User.") io.quit() diff --git a/psychopy/iohub/client/eyetracker/validation/procedure.py b/psychopy/iohub/client/eyetracker/validation/procedure.py index 9d8d9426a11..977ddbd40f8 100644 --- a/psychopy/iohub/client/eyetracker/validation/procedure.py +++ b/psychopy/iohub/client/eyetracker/validation/procedure.py @@ -171,7 +171,7 @@ class ValidationTargetRenderer(object): ('pupil_size', np.float64)] def __init__(self, win, target, positions, background=None, storeeventsfor=[], triggers=None, msgcategory='', - config=None, io=None, terminate_key='escape', gaze_cursor_key='g'): + io=None, terminate_key='escape', gaze_cursor_key='g'): """ ValidationTargetRenderer combines an instance of a Target stim and an instance of a PositionGrid to create everything needed to present the @@ -201,7 +201,6 @@ def __init__(self, win, target, positions, background=None, storeeventsfor=[], t :param storeeventsfor: :param triggers: :param msgcategory: - :param config: :param io: """ self.terminate_key = terminate_key @@ -231,14 +230,6 @@ def __init__(self, win, target, positions, background=None, storeeventsfor=[], t self.targetdata = [] self.triggers = triggers - - - def getIO(self): - """ - Get the active ioHubConnection instance. - """ - return self.io - def _draw(self): """ Fill the window with the specified background color and draw the @@ -264,7 +255,7 @@ def _animateTarget(self, topos, frompos, **kwargs): Return the flip time when the target was first drawn at the newpos location. """ - io = self.getIO() + io = self.io if frompos is not None: velocity = kwargs.get('velocity') if velocity: @@ -339,7 +330,7 @@ def moveTo(self, topos, frompos, **kwargs): directly. Instead, use the display() method to start the full target position presentation sequence. """ - io = self.getIO() + io = self.io fpx, fpy = -1, -1 if frompos is not None: fpx, fpy = frompos[0], frompos[1] @@ -389,7 +380,7 @@ def _hasTriggerFired(self, **kwargs): event_type_id = trig_evt.type # get time trigger of trigger event event_time = triggered.getTriggeringTime() - self.getIO().sendMessageEvent('NEXT_POS_TRIG %d %.3f' % (event_type_id, event_time), self.msgcategory) + self.io.sendMessageEvent('NEXT_POS_TRIG %d %.3f' % (event_type_id, event_time), self.msgcategory) for trig in self.triggers: trig.resetTrigger() return triggered @@ -505,7 +496,7 @@ def display(self, **kwargs): del self.targetdata[:] prevpos = None - io = self.getIO() + io = self.io io.clearEvents('all') io.sendMessageEvent('BEGIN_SEQUENCE {0}'.format(len(self.positions.positions)), self.msgcategory) turn_rec_off = [] @@ -780,6 +771,7 @@ def __init__(self, win=None, target=None, positions=None, target_animation={}, r :param terminate_key: :param toggle_gaze_cursor_key: """ + print("TODO: Add max error threshold to filter 'valid' samples for each target position.") self.terminate_key = terminate_key self.toggle_gaze_cursor_key = toggle_gaze_cursor_key @@ -937,21 +929,21 @@ def _createValidationResults(self): filter_stime = last_stime - self.accuracy_period_start filter_etime = last_stime - self.accuracy_period_stop - all_samples_for_accuracy_calc = stationary_samples[stationary_samples['eye_time'] >= filter_stime] - all_samples_for_accuracy_calc = all_samples_for_accuracy_calc[all_samples_for_accuracy_calc['eye_time'] < filter_etime] + all_samples_in_period = stationary_samples[stationary_samples['eye_time'] >= filter_stime] + all_samples_in_period = all_samples_in_period[all_samples_in_period['eye_time'] < filter_etime] - good_samples_for_accuracy_calc = all_samples_for_accuracy_calc[all_samples_for_accuracy_calc['eye_status'] <= 1] + good_samples_in_period = all_samples_in_period[all_samples_in_period['eye_status'] == 0] - all_samples_for_accuracy_count = all_samples_for_accuracy_calc.shape[0] - good_accuracy_sample_count = good_samples_for_accuracy_calc.shape[0] + all_samples_count = all_samples_in_period.shape[0] + good_sample_count = good_samples_in_period.shape[0] try: - accuracy_calc_good_sample_perc = good_accuracy_sample_count / float(all_samples_for_accuracy_count) + good_sample_ratio = good_sample_count / float(all_samples_count) except ZeroDivisionError: - accuracy_calc_good_sample_perc = 0 + good_sample_ratio = 0 else: - all_samples_for_accuracy_calc = [] - good_samples_for_accuracy_calc = [] - accuracy_calc_good_sample_perc = 0 + all_samples_in_period = [] + good_samples_in_period = [] + good_sample_ratio = 0 # Ordered dictionary of the different levels of samples selected during filtering # for valid samples to use in accuracy calculations. @@ -961,37 +953,37 @@ def _createValidationResults(self): stationary_samples=stationary_samples, # Samples that occurred within the # defined time selection period. - time_filtered_samples=all_samples_for_accuracy_calc, + time_filtered_samples=all_samples_in_period, # Samples from the selection period that # do not have missing data - used_samples=good_samples_for_accuracy_calc) + used_samples=good_samples_in_period) position_results = dict(pos_index=pindex, sample_time_range=[first_stime, last_stime], filter_samples_time_range=[filter_stime, filter_etime], - valid_filtered_sample_perc=accuracy_calc_good_sample_perc) + valid_filtered_sample_perc=good_sample_ratio) for k, v in position_results.items(): self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') position_results['sample_from_filter_stages'] = sample_msg_data_filtering - if int(accuracy_calc_good_sample_perc*100) == 0: + if int(good_sample_ratio*100) == 0: position_results['calculation_status'] = 'FAILED' results['positions_failed_processing'] += 1 else: - target_x = good_samples_for_accuracy_calc[:]['targ_pos_x'] - target_y = good_samples_for_accuracy_calc[:]['targ_pos_y'] + target_x = good_samples_in_period[:]['targ_pos_x'] + target_y = good_samples_in_period[:]['targ_pos_y'] if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: - left_x = good_samples_for_accuracy_calc[:]['left_eye_x'] - left_y = good_samples_for_accuracy_calc[:]['left_eye_y'] + left_x = good_samples_in_period[:]['left_eye_x'] + left_y = good_samples_in_period[:]['left_eye_y'] left_error_x = target_x - left_x left_error_y = target_y - left_y left_error_xy = np.hypot(left_error_x, left_error_y) - right_x = good_samples_for_accuracy_calc[:]['right_eye_x'] - right_y = good_samples_for_accuracy_calc[:]['right_eye_y'] + right_x = good_samples_in_period[:]['right_eye_x'] + right_y = good_samples_in_period[:]['right_eye_y'] right_error_x = target_x - right_x right_error_y = target_y - right_y right_error_xy = np.hypot(right_error_x, right_error_y) @@ -1006,8 +998,8 @@ def _createValidationResults(self): summed_error += lr_error_mean point_count += 1.0 else: - eye_x = good_samples_for_accuracy_calc[:]['eye_x'] - eye_y = good_samples_for_accuracy_calc[:]['eye_y'] + eye_x = good_samples_in_period[:]['eye_x'] + eye_y = good_samples_in_period[:]['eye_y'] error_x = target_x - eye_x error_y = target_y - eye_y error_xy = np.hypot(error_x, error_y) @@ -1067,8 +1059,8 @@ def createPlot(self): pl.clf() fig = pl.gcf() fig.set_size_inches((pixw * .9) / self.use_dpi, (pixh * .8) / self.use_dpi) - cm = pl.cm.get_cmap('RdYlBu') - + color_list = pl.cm.tab20b(np.linspace(0, 1, (len(results['position_results'])))) + ci = 0 for position_results in results['position_results']: pindex = position_results['pos_index'] if position_results['calculation_status'] == 'FAILED': @@ -1076,7 +1068,6 @@ def createPlot(self): pass else: samples = position_results['sample_from_filter_stages']['used_samples'] - time = samples[:]['eye_time'] target_x = samples[:]['targ_pos_x'] target_y = samples[:]['targ_pos_y'] if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: @@ -1086,10 +1077,12 @@ def createPlot(self): gaze_x = samples[:]['eye_x'] gaze_y = samples[:]['eye_y'] - pl.scatter(target_x[0], target_y[0], s=400, color=[0.75, 0.75, 0.75], alpha=0.5) - pl.text(target_x[0], target_y[0], str(pindex), size=11, horizontalalignment='center', + pl.scatter(target_x[0], target_y[0], s=400, color=color_list[ci]) + pl.scatter(target_x[0], target_y[0], s=300, color=(0.75, 0.75, 0.75)) + pl.text(target_x[0], target_y[0], str(pindex), size=16, color=color_list[ci], horizontalalignment='center', verticalalignment='center') - pl.scatter(gaze_x, gaze_y, s=40, c='g', alpha=0.75) + pl.scatter(gaze_x, gaze_y, s=40, color=color_list[ci], alpha=0.75) + ci += 1 if self.results_in_degrees: l, b = toDeg(self.win, (-pixw / 2,), (-pixh / 2, )) @@ -1156,5 +1149,3 @@ def _buildResultScreen(self, replot=False): elif self.imagestim: return True return False - -from psychopy.iohub.util.visualangle import VisualAngleCalc From 6b5dcd06f5f0546c7006dbd4da7215d5181d3f28 Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Thu, 1 Apr 2021 07:23:21 -0300 Subject: [PATCH 12/26] RF: validation proc code reorg --- .../coder/iohub/eyetracking/validation.py | 2 +- .../client/eyetracker/validation/procedure.py | 1813 ++++++++--------- 2 files changed, 905 insertions(+), 910 deletions(-) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation.py b/psychopy/demos/coder/iohub/eyetracking/validation.py index 2714e8ce503..9ae85a59f76 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python # -*- coding: utf-8 -*- """ Example of performing eye tracker validation using the ioHub Common Eye Tracker interface diff --git a/psychopy/iohub/client/eyetracker/validation/procedure.py b/psychopy/iohub/client/eyetracker/validation/procedure.py index 977ddbd40f8..e0d2eb495a3 100644 --- a/psychopy/iohub/client/eyetracker/validation/procedure.py +++ b/psychopy/iohub/client/eyetracker/validation/procedure.py @@ -3,16 +3,15 @@ Eye Tracker Validation procedure using the ioHub common eye tracker interface. To use the validation process from within a Coder script: -* Create an instance of TargetStim, specifying the fixation target appearance. -* Create an instance of PositionGrid, which defines target position information. -* Create a ValidationTargetRenderer instance, providing the TargetStim and - PositionGrid objects created, as well as the Trigger's which should be used - to transition from one target position to another during the sequence of - target graphics presentation and the defined positions. -* Use ValidationTargetRenderer.display() to run the full presentation procedure. -* Use ValidationTargetRenderer.targetdata to access information about each target - position displayed and the events collected during the display duration for - each position. +* Create a target stim, using TargetStim, or any stim class that has a `.setPos()`, `setRadius()`, and `.draw()` + method. +* Create a list of target positions to use during validation. Use PositionGrid class to help create + a target position list. +* Create a ValidationProcedure class instance, providing the target stim and position list + and other arguments to define details of the validation procedure. +* Use `ValidationProcedure.run()` to perform the validation routine. +* Use `ValidationProcedure.getValidationResults()` to access information about each target + position displayed and the events collected during the each target validation period. See demos/coder/iohub/eyetracking/validation.py for a complete example. """ @@ -36,32 +35,6 @@ getTime = core.getTime -def toPix(win, x, y): - """Returns the stim's position in pixels, - based on its pos, units, and win. - """ - try: - xy = np.zeros((len(x), 2)) - except TypeError: - xy = np.zeros((1, 2)) - - xy[:, 0] = x - xy[:, 1] = y - r = convertToPix(np.asarray((0, 0)), xy, win.units, win) - return r[:, 0], r[:, 1] - - -def toDeg(win, x, y): - try: - xy = np.zeros((len(x), 2)) - except TypeError: - xy = np.zeros((1, 2)) - xy[:, 0] = x - xy[:, 1] = y - r = pix2deg(xy, win.monitor, correctFlat=False) - return r[:, 0], r[:, 1] - - class TargetStim(object): def __init__(self, win, radius=None, fillcolor=None, edgecolor=None, edgewidth=None, dotcolor=None, dotradius=None, units=None, colorspace=None, opacity=1.0, contrast=1.0): @@ -124,1028 +97,1050 @@ def contains(self, p): return self.stim[0].contains(p) -class ValidationTargetRenderer(object): - TARGET_STATIONARY = 1 - TARGET_MOVING = 2 - TARGET_EXPANDING = 4 - TARGET_CONTRACTING = 8 - # Experiment Message text field types and tokens - message_types = dict(BEGIN_SEQUENCE=('BEGIN_SEQUENCE', '', int), - DONE_SEQUENCE=('DONE_SEQUENCE', '', int), - NEXT_POS_TRIG=('NEXT_POS_TRIG', '', int, float), - START_DRAW=('START_DRAW', ',', int, float, float, float, float), - SYNCTIME=('SYNCTIME', ',', int, float, float, float, float), - EXPAND_SIZE=('EXPAND_SIZE', '', float, float), - CONTRACT_SIZE=('CONTRACT_SIZE', '', float, float), - POS_UPDATE=('POS_UPDATE', ',', float, float), - TARGET_POS=('TARGET_POS', ',', float, float)) - max_msg_type_length = max([len(s) for s in message_types.keys()]) - binocular_sample_message_element = [('targ_pos_ix', np.int), - ('last_msg_time', np.float64), - ('last_msg_type', np.str, max_msg_type_length), - ('next_msg_time', np.float64), - ('next_msg_type', np.str, max_msg_type_length), - ('targ_pos_x', np.float64), - ('targ_pos_y', np.float64), - ('targ_state', np.int), - ('eye_time', np.float64), - ('eye_status', np.int), - ('left_eye_x', np.float64), - ('left_eye_y', np.float64), - ('left_pupil_size', np.float64), - ('right_eye_x', np.float64), - ('right_eye_y', np.float64), - ('right_pupil_size', np.float64)] - monocular_sample_message_element = [('targ_pos_ix', np.int), - ('last_msg_time', np.float64), - ('last_msg_type', np.str, max_msg_type_length), - ('next_msg_time', np.float64), - ('next_msg_type', np.str, max_msg_type_length), - ('targ_pos_x', np.float64), - ('targ_pos_y', np.float64), - ('targ_state', np.int), - ('eye_time', np.float64), - ('eye_status', np.int), - ('eye_x', np.float64), - ('eye_y', np.float64), - ('pupil_size', np.float64)] - - def __init__(self, win, target, positions, background=None, storeeventsfor=[], triggers=None, msgcategory='', - io=None, terminate_key='escape', gaze_cursor_key='g'): +class ValidationProcedure(object): + def __init__(self, win=None, target=None, positions=None, target_animation={}, randomize_positions=True, + background=None, triggers=None, storeeventsfor=None, accuracy_period_start=0.350, + accuracy_period_stop=.050, show_intro_screen=True, intro_text='Ready to Start Validation Procedure.', + show_results_screen=True, results_in_degrees=False, save_figure_path=None, + terminate_key="escape", toggle_gaze_cursor_key="g"): """ - ValidationTargetRenderer combines an instance of a Target stim and an - instance of a PositionGrid to create everything needed to present the - target at each position returned by the PositionGrid instance within the - psychopy window used to create the Target stim. The target is presented at - each position sequentially. + ValidationProcedure is used to check the accuracy of a calibrated eye tracking system. - By providing keyword arguments to the ValidationTargetRenderer.display(...) - method, position animation between target positions, and target stim - expansion and / or contraction transitions are possible. + Once a ValidationProcedure class instance has been created, the run(**kwargs) method + can be called to start the validation process. - psychopy.iohub.Trigger based classes are used to define the criteria used to - start displaying the next target position graphics. By providing a list - of a TimerTrigger and a set of DeviceEventTriggers, complex criteria - for target position pacing can be easily defined for use during the display - period. + The validation process consists of the following stages: - iohub devices can be provided in the storeeventsfor keyword argument. - Events which occur during each target position presentation period are - stored and are available at the end of the display() period, grouped by - position index and device event types. + 1) Display an Introduction / Instruction screen. A key press is used to + start target presentation. + 2) The validation target presentation sequence. Based on the Target and + PositionGrid objects provided when the ValidationProcedure was created, + a series of target positions are displayed. The progression from one + target position to the next is controlled by the triggers specified. + The target can simply jump from one position to the next, or optional + linear motion settings can be used to have the target move across the + screen from one point to the next. The Target graphic itself can also + be configured to expand or contract once it has reached a location + defined in the position grid. + 3) During stage 2), data is collected from the devices being monitored by + iohub. Specifically eye tracker samples and experiment messages are + collected. + 4) The data collected during the validation target sequence is used to + calculate accuracy information for each target position presented. + The raw data as well as the computed accuracy data is available via the + ValidationProcedure class. Calculated measures are provided separately + for each target position and include: + + a) An array of the samples used for the accuracy calculation. The + samples used are selected using the following criteria: + i) Only samples where the target was stationary and + not expanding or contracting are selected. + + ii) Samples are selected that fall between: + + start_time_filter = last_sample_time - accuracy_period_start + + and + + end_time_filter = last_sample_time - accuracy_period_end + + Therefore, the duration of the selected sample period is: + + selection_period_dur = end_time_filter - start_time_filter + + iii) Sample that contain missing / invalid position data + are then removed, providing the final set of samples + used for accuracy calculations. The min, max, and mean + values from each set of selected samples is calculated. + + b) The x and y error of each samples gaze position relative to the + current target position. This data is in the same units as is + used by the Target instance. Computations are done for each eye + being recorded. The values are signed floats. + + c) The xy distance error from the from each eye's gaze position to + the target position. This is also calculated as an average of + both eyes when binocular data is available. The data is unsigned, + providing the absolute distance from gaze to target positions + + 5) A 2D plot is created displaying each target position and the position of + each sample used for the accuracy calculation. The minimum, maximum, and + average error is displayed for all target positions. A key press is used + to remove the validation results plot, and control is returned to the + script that started the validation display. Note that the plot is also + saved as a png file in the same directory as the calling stript. + + See the validation.py demo in demos.coder.iohub.eyetracker for example usage. :param win: :param target: :param positions: + :param target_animation: + :param randomize_positions: :param background: - :param storeeventsfor: :param triggers: - :param msgcategory: - :param io: + :param storeeventsfor: + :param accuracy_period_start: + :param accuracy_period_stop: + :param show_intro_screen: + :param intro_text: + :param show_results_screen: + :param results_in_degrees: + :param save_figure_path: + :param terminate_key: + :param toggle_gaze_cursor_key: """ self.terminate_key = terminate_key - self.gaze_cursor_key = gaze_cursor_key - self.display_gaze = False - gc_size = deg2pix(3.0, win.monitor, correctFlat=False) - self.gaze_cursor = visual.GratingStim(win, tex=None, mask='gauss', pos=(0, 0), size=(gc_size, gc_size), - color='green', units='pix', opacity=0.8) - self._terminate_requested = False - self.win = proxy(win) - self.target = target - self.background = background + self.toggle_gaze_cursor_key = toggle_gaze_cursor_key + + self.io = ioHubConnection.getActiveConnection() + + if isinstance(positions, (list, tuple)): + positions = PositionGrid(posList=positions, firstposindex=0, repeatFirstPos=False) self.positions = positions - self.storeevents = storeeventsfor - self.msgcategory = msgcategory - if io is None: - io = ioHubConnection.getActiveConnection() - self.io = io - self._keyboard = self.io.devices.keyboard + self.randomize_positions = randomize_positions + if self.randomize_positions: + self.positions.randomize() + self.win = proxy(win) + if target_animation is None: + target_animation = {} + self.animation_params = target_animation + self.accuracy_period_start = accuracy_period_start + self.accuracy_period_stop = accuracy_period_stop + self.show_intro_screen = show_intro_screen + self.intro_text = intro_text + self.show_results_screen = show_results_screen + self.results_in_degrees = results_in_degrees + self.save_figure_path = save_figure_path + self.validation_results = None + if storeeventsfor is None: + storeeventsfor = [self.io.devices.keyboard, + self.io.devices.mouse, + self.io.devices.tracker, + self.io.devices.experiment + ] - # If storeevents is True, targetdata will be a list of dict's. - # Each dict, among other things, contains all ioHub events that occurred - # from when a target was first presented at a position, to when the - # the wait period completed for that position. - # - self.targetdata = [] - self.triggers = triggers + if triggers is None: + # Use space key press as default target trigger + triggers = KeyboardTrigger(' ', on_press=True) + triggers = Trigger.getTriggersFrom(triggers) - def _draw(self): - """ - Fill the window with the specified background color and draw the - target stim. - """ - if self.background: - self.background.draw() - self.target.draw() - if self.display_gaze: - gpos = self.io.devices.tracker.getLastGazePosition() - valid_gaze_pos = isinstance(gpos, (tuple, list)) - if valid_gaze_pos: - pix_pos = toPix(self.win, *gpos) - pix_pos = pix_pos[0][0], pix_pos[1][0] - self.gaze_cursor.setPos(pix_pos) - self.gaze_cursor.draw() + # Create the ValidationTargetRenderer instance; used to control the sequential + # presentation of the target at each of the grid positions. + self.targetsequence = ValidationTargetRenderer(win, target=target, positions=self.positions, + background=background, + triggers=triggers, storeeventsfor=storeeventsfor, + terminate_key=terminate_key, + gaze_cursor_key=toggle_gaze_cursor_key) + # Stim for results screen + self.imagestim = None + self.textstim = None + self.use_dpi = 90 - def _animateTarget(self, topos, frompos, **kwargs): + def run(self): """ - Any logic related to drawing the target at the new screen position, - including any intermediate animation effects, is done here. - - Return the flip time when the target was first drawn at the newpos - location. + Run the validation procedure, returning after the full validation process is complete, including: + a) display of an instruction screen + b) display of the target position sequence used for validation data collection. + c) display of a validation accuracy results plot. """ - io = self.io - if frompos is not None: - velocity = kwargs.get('velocity') - if velocity: - starttime = getTime() - a, b = np.abs(topos - frompos) ** 2 - duration = np.sqrt(a + b) / velocity - arrivetime = duration + starttime - fliptime = starttime - while fliptime < arrivetime: - mu = (fliptime - starttime) / duration - tpos = frompos * (1.0 - mu) + topos * mu - self.target.setPos(frompos * (1.0 - mu) + topos * mu) - self._draw() - fliptime = self.win.flip() - io.sendMessageEvent('POS_UPDATE %.4f,%.4f' % (tpos[0], tpos[1]), self.msgcategory, - sec_time=fliptime) - self._addDeviceEvents() - if self._terminate_requested: - return 0 - - self.target.setPos(topos) - self._draw() - fliptime = self.win.flip() - io.sendMessageEvent('TARGET_POS %.4f,%.4f' % (topos[0], topos[1]), self.msgcategory, sec_time=fliptime) - self._addDeviceEvents() - - expandedscale = kwargs.get('expandedscale') - expansionduration = kwargs.get('expansionduration') - contractionduration = kwargs.get('contractionduration') + keyboard = self.io.devices.keyboard + if self.show_intro_screen: + # Display Validation Intro Screen + self.showIntroScreen() + if self.terminate_key and self.terminate_key in keyboard.waitForReleases(keys=[' ', self.terminate_key]): + print("Escape key pressed. Exiting validation") + self.validation_results = None + return - initialradius = self.target.radius - if expandedscale: - expandedradius = self.target.radius * expandedscale + # Perform Validation..... + terminate = not self.targetsequence.display(**self.animation_params) + if terminate: + print("Escape key pressed. Exiting validation") + self.validation_results = None + return - if expansionduration: - starttime = fliptime - expandedtime = fliptime + expansionduration - while fliptime < expandedtime: - mu = (fliptime - starttime) / expansionduration - cradius = initialradius * (1.0 - mu) + expandedradius * mu - self.target.setRadius(cradius) - self._draw() - fliptime = self.win.flip() - io.sendMessageEvent('EXPAND_SIZE %.4f %.4f' % (cradius, initialradius), self.msgcategory, - sec_time=fliptime) - self._addDeviceEvents() - if self._terminate_requested: - return 0 - if contractionduration: - starttime = fliptime - contractedtime = fliptime + contractionduration - while fliptime < contractedtime: - mu = (fliptime - starttime) / contractionduration - cradius = expandedradius * (1.0 - mu) + initialradius * mu - self.target.setRadius(cradius) - self._draw() - fliptime = self.win.flip() - io.sendMessageEvent('CONTRACT_SIZE %.4f %.4f' % (cradius, initialradius), self.msgcategory, - sec_time=fliptime) - self._addDeviceEvents() - if self._terminate_requested: - return 0 + self.io.clearEvents('all') - self.target.setRadius(initialradius) - return fliptime + self._createValidationResults() - def moveTo(self, topos, frompos, **kwargs): - """ - Indicates that the target should be moved frompos to topos. + if self.show_results_screen: + if self.showResultsScreen() is not None: + if self.terminate_key and self.terminate_key in keyboard.waitForPresses(keys=[' ', self.terminate_key]): + print("Escape key pressed. Exiting validation") + self.validation_results = None + return + return self.validation_results - If a PositionGrid has been provided, moveTo should not be called - directly. Instead, use the display() method to start the full - target position presentation sequence. - """ - io = self.io - fpx, fpy = -1, -1 - if frompos is not None: - fpx, fpy = frompos[0], frompos[1] - io.sendMessageEvent('START_DRAW %d %.4f,%.4f %.4f,%.4f' % (self.positions.posIndex, fpx, fpy, topos[0], - topos[1]), self.msgcategory) - fliptime = self._animateTarget(topos, frompos, **kwargs) - io.sendMessageEvent('SYNCTIME %d %.4f,%.4f %.4f,%.4f' % (self.positions.posIndex, fpx, fpy, topos[0], topos[1]), - self.msgcategory, sec_time=fliptime) + def showResultsScreen(self): + self._buildResultScreen() + if self.imagestim: + self.imagestim.draw() + self.textstim.draw() + return self.win.flip() - # wait for trigger to fire - last_pump_time = fliptime - trig_fired = self._hasTriggerFired(start_time=fliptime) - while not trig_fired: - if getTime() - last_pump_time >= 0.250: - win32MessagePump() - last_pump_time = getTime() + def showIntroScreen(self): + text = self.intro_text + '\nPress SPACE to Start....' + textpos = (0, 0) + if self.textstim: + self.textstim.setText(text) + self.textstim.setPos(textpos) + else: + self.textstim = visual.TextStim(self.win, text=text, pos=textpos, height=30, color=(0, 0, 0), + colorSpace='rgb255', opacity=1.0, contrast=1.0, units='pix', + ori=0.0, antialias=True, bold=False, italic=False, anchorHoriz='center', + anchorVert='center', wrapWidth=self.win.size[0] * .8) - if self.display_gaze: - self._draw() - self.win.flip() - else: - sleep(0.001) + self.textstim.draw() + return self.win.flip() - if self._checkForTerminate(): - return - self._checkForToggleGaze() - trig_fired = self._hasTriggerFired(start_time=fliptime) + def getValidationResults(self): + return self.validation_results - def _hasTriggerFired(self, **kwargs): - """ - Used internally to know when one of the triggers has occurred and - the target should move to the next target position. + def _createValidationResults(self): """ - # wait for trigger to fire - triggered = None - for trig in self.triggers: - if trig.triggered(**kwargs): - triggered = trig - break - self._addDeviceEvents(trig.clearEventHistory(True)) - if triggered: - # by default, assume it was a timer trigger,so use 255 as 'event type' - event_type_id = 255 - trig_evt = triggered.getTriggeringEvent() - if hasattr(trig_evt, 'type'): - # actually it was a device event trigger - event_type_id = trig_evt.type - # get time trigger of trigger event - event_time = triggered.getTriggeringTime() - self.io.sendMessageEvent('NEXT_POS_TRIG %d %.3f' % (event_type_id, event_time), self.msgcategory) - for trig in self.triggers: - trig.resetTrigger() - return triggered + Create validation results dict and save validation analysis info as experiment messages to + the iohub .hdf5 file. - def _initTargetData(self, frompos, topos): - """ - Internally used to create the data structure used to store position - information and events which occurred during each target position - period. + :return: dict """ - if self.storeevents: - deviceevents = {} - for device in self.storeevents: - deviceevents[device] = [] - self.targetdata.append(dict(frompos=frompos, topos=topos, events=deviceevents)) + self.validation_results = None + sample_array = self.targetsequence.getSampleMessageData() - def _addDeviceEvents(self, device_event_dict={}): - if self._checkForTerminate(): - return - self._checkForToggleGaze() - dev_event_buffer = self.targetdata[-1]['events'] - for dev, dev_events in dev_event_buffer.items(): - if dev in device_event_dict: - dev_events.extend(device_event_dict[dev]) - else: - dev_events.extend(dev.getEvents()) + if self.results_in_degrees: + for postdat in sample_array: + postdat['targ_pos_x'], postdat['targ_pos_y'] = toDeg(self.win, + *toPix(self.win, postdat['targ_pos_x'], + postdat['targ_pos_y'])) - def _checkForTerminate(self): - keys = self._keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=False) - for k in keys: - if k.key == self.terminate_key: - self._terminate_requested = True - break - return self._terminate_requested + if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + postdat['left_eye_x'], postdat['left_eye_y'] = toDeg(self.win, + *toPix(self.win, postdat['left_eye_x'], + postdat['left_eye_y'])) - def _checkForToggleGaze(self): - keys = self._keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=False) - for k in keys: - if k.key == self.gaze_cursor_key: - # get (clear) the event so it does not trigger multiple times. - self._keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=True) - self.display_gaze = not self.display_gaze - self._draw() - self.win.flip() - return self.display_gaze - return self.display_gaze + postdat['right_eye_x'], postdat['right_eye_y'] = toDeg(self.win, + *toPix(self.win, postdat['right_eye_x'], + postdat['right_eye_y'])) + else: + postdat['eye_x'], postdat['eye_y'] = toDeg(self.win, + *toPix(self.win, postdat['eye_x'], postdat['eye_y'])) - def display(self, **kwargs): - """ - Display the target at each point in the position grid, performing - target animation if requested. The target then holds position until one - of the specified triggers occurs, resulting in the target moving to the - next position in the positiongrid. + min_error = 100000.0 + max_error = 0.0 + summed_error = 0.0 + point_count = 0 - To setup target animation between grid positions, the following keyword - arguments are supported. If an option is not specified, the animation - related to it is not preformed. + self.io.sendMessageEvent('Results', 'VALIDATION') + results = dict(display_units=self.win.units, display_bounds=self.positions.bounds, + display_pix=self.win.size, position_count=len(sample_array), + target_positions=self.targetsequence.positions.getPositions()) - velocity: The rate (units / second) at which the target should move - from a current target position to the next target position. - The value should be in the unit type the target stimulus - is using. + for k, v in results.items(): + self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') - expandedscale: When a target stimulus is at the current grid position, - the target graphic can expand to a size equal to the - original target radius * expandedscale. + results['position_results'] = [] + results['positions_failed_processing'] = 0 - expansionduration: If expandedscale has been specified, this option is - used to set how long it should take for the target to - reach the full expanded target size. Time is in sec. + for pindex, samplesforpos in enumerate(sample_array): + self.io.sendMessageEvent('Target Position Results: {0}'.format(pindex), 'VALIDATION') - contractionduration: If a target has been expanded, this option is used - to specify how many seconds it should take for the - target to contract back to the original target - radius. + stationary_samples = samplesforpos[samplesforpos['targ_state'] == self.targetsequence.TARGET_STATIONARY] - Note that target expansion and contraction change the target stimulus - outer diameter only. The edge thickness and central dot radius do not - change. + if len(stationary_samples): + last_stime = stationary_samples[-1]['eye_time'] + first_stime = stationary_samples[0]['eye_time'] - All movement and size changes are linear in fashion. + filter_stime = last_stime - self.accuracy_period_start + filter_etime = last_stime - self.accuracy_period_stop - For example, to display a static target at each grid position:: + all_samples_in_period = stationary_samples[stationary_samples['eye_time'] >= filter_stime] + all_samples_in_period = all_samples_in_period[all_samples_in_period['eye_time'] < filter_etime] - targetsequence.display() + good_samples_in_period = all_samples_in_period[all_samples_in_period['eye_status'] == 0] - To have the target stim move between each grid position - at 400 pixels / sec and not expand or contract:: + all_samples_count = all_samples_in_period.shape[0] + good_sample_count = good_samples_in_period.shape[0] + try: + good_sample_ratio = good_sample_count / float(all_samples_count) + except ZeroDivisionError: + good_sample_ratio = 0 + else: + all_samples_in_period = [] + good_samples_in_period = [] + good_sample_ratio = 0 - targetsequence.display(velocity=400.0) + # Ordered dictionary of the different levels of samples selected during filtering + # for valid samples to use in accuracy calculations. + sample_msg_data_filtering = OrderedDict(all_samples=samplesforpos, # All samples from target period. + # Sample during stationary period at end of target + # presentation display. + stationary_samples=stationary_samples, + # Samples that occurred within the + # defined time selection period. + time_filtered_samples=all_samples_in_period, + # Samples from the selection period that + # do not have missing data + used_samples=good_samples_in_period) - If the target should jump from one grid position to the next, and then - expand to twice the radius over a 0.5 second period:: + position_results = dict(pos_index=pindex, + sample_time_range=[first_stime, last_stime], + filter_samples_time_range=[filter_stime, filter_etime], + valid_filtered_sample_perc=good_sample_ratio) - targetsequence.display( - expandedscale=2.0, - expansionduration=0.50 - ) + for k, v in position_results.items(): + self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') - To do a similar animation as the pervious example, but also have the - target contract back to it's original size over 0.75 seconds:: + position_results['sample_from_filter_stages'] = sample_msg_data_filtering - targetsequence.display( - expandedscale=2.0, - expansionduration=0.50, - contractionduration=0.75 - ) + if int(good_sample_ratio * 100) == 0: + position_results['calculation_status'] = 'FAILED' + results['positions_failed_processing'] += 1 + else: + target_x = good_samples_in_period[:]['targ_pos_x'] + target_y = good_samples_in_period[:]['targ_pos_y'] - When this method returns, the target has been displayed at all - positions. Data collected for each position period can be accessed via - the targetdata attribute. - """ - del self.targetdata[:] - prevpos = None + if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + left_x = good_samples_in_period[:]['left_eye_x'] + left_y = good_samples_in_period[:]['left_eye_y'] + left_error_x = target_x - left_x + left_error_y = target_y - left_y + left_error_xy = np.hypot(left_error_x, left_error_y) - io = self.io - io.clearEvents('all') - io.sendMessageEvent('BEGIN_SEQUENCE {0}'.format(len(self.positions.positions)), self.msgcategory) - turn_rec_off = [] - for d in self.storeevents: - if not d.isReportingEvents(): - d.enableEventReporting(True) - turn_rec_off.append(d) + right_x = good_samples_in_period[:]['right_eye_x'] + right_y = good_samples_in_period[:]['right_eye_y'] + right_error_x = target_x - right_x + right_error_y = target_y - right_y + right_error_xy = np.hypot(right_error_x, right_error_y) - sleep(0.025) - for pos in self.positions: - self._initTargetData(prevpos, pos) - self._addDeviceEvents() - if self._terminate_requested: - break - self.moveTo(pos, prevpos, **kwargs) - prevpos = pos - self._addDeviceEvents() - if self._terminate_requested: - break + lr_error = (right_error_xy + left_error_xy) / 2.0 + lr_error_max = lr_error.max() + lr_error_min = lr_error.min() + lr_error_mean = lr_error.mean() + lr_error_std = np.std(lr_error) + min_error = min(min_error, lr_error_min) + max_error = max(max_error, lr_error_max) + summed_error += lr_error_mean + point_count += 1.0 + else: + eye_x = good_samples_in_period[:]['eye_x'] + eye_y = good_samples_in_period[:]['eye_y'] + error_x = target_x - eye_x + error_y = target_y - eye_y + error_xy = np.hypot(error_x, error_y) + lr_error = error_xy + lr_error_max = lr_error.max() + lr_error_min = lr_error.min() + lr_error_mean = lr_error.mean() + lr_error_std = np.std(lr_error) + min_error = min(min_error, lr_error_min) + max_error = max(max_error, lr_error_max) + summed_error += lr_error_mean + point_count += 1.0 - for d in turn_rec_off: - d.enableEventReporting(False) + position_results2 = dict() + position_results2['calculation_status'] = 'PASSED' + position_results2['target_position'] = (target_x[0], target_y[0]) + position_results2['min_error'] = lr_error_min + position_results2['max_error'] = lr_error_max + position_results2['mean_error'] = lr_error_mean + position_results2['stdev_error'] = lr_error_std + for k, v in position_results2.items(): + self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') + position_results[k] = v + results['position_results'].append(position_results) + self.io.sendMessageEvent('Done Target Position Results : {0}'.format(pindex), 'VALIDATION') - if self._terminate_requested: - io.sendMessageEvent('VALIDATION TERMINATED BY USER', self.msgcategory) - return False + unit_type = self.win.units + if self.results_in_degrees: + unit_type = 'degrees' + mean_error = summed_error / point_count + err_results = dict(reporting_unit_type=unit_type, min_error=min_error, max_error=max_error, + mean_error=mean_error, passed=results['positions_failed_processing'] == 0, + positions_failed_processing=results['positions_failed_processing']) - io.sendMessageEvent('DONE_SEQUENCE {0}'.format( len(self.positions.positions)), self.msgcategory) - sleep(0.025) - self._addDeviceEvents() - io.clearEvents('all') - return True + for k, v in err_results.items(): + self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') + results[k] = v - def _processMessageEvents(self): - self.target_pos_msgs = [] - self.saved_pos_samples = [] - for pd in self.targetdata: - frompos = pd.get('frompos') - topos = pd.get('topos') - events = pd.get('events') + self.io.sendMessageEvent('Validation Report Complete', 'VALIDATION') - # create a dict of device labels as keys, device events as value - devlabel_events = {} - for k, v in events.items(): - devlabel_events[k.getName()] = v + self.validation_results = results + return self.validation_results - samples = devlabel_events.get('tracker', []) - # remove any eyetracker events that are not samples - samples = [s for s in samples if s.type in (EventConstants.BINOCULAR_EYE_SAMPLE, - EventConstants.MONOCULAR_EYE_SAMPLE)] - self.saved_pos_samples.append(samples) + def createPlot(self): + """ + Creates a matplotlib figure of validation results. + :return: + """ + from matplotlib import pyplot as pl - self.sample_type = self.saved_pos_samples[0][0].type - if self.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: - self.sample_msg_dtype = self.binocular_sample_message_element - else: - self.sample_msg_dtype = self.monocular_sample_message_element + results = self.getValidationResults() + if results is None: + raise RuntimeError("Validation must be run before creating results plot.") - messages = devlabel_events.get('experiment', []) - msg_lists = [] - for m in messages: - temp = m.text.strip().split() - msg_type = self.message_types.get(temp[0]) - if msg_type: - current_msg = [m.time, m.category] - if msg_type[1] == ',': - for t in temp: - current_msg.extend(t.split(',')) - else: - current_msg.extend(temp) + pixw, pixh = results['display_pix'] - for mi, dtype in enumerate(msg_type[2:]): - current_msg[mi + 3] = dtype(current_msg[mi + 3]) + pl.clf() + fig = pl.gcf() + fig.set_size_inches((pixw * .9) / self.use_dpi, (pixh * .8) / self.use_dpi) + color_list = pl.cm.tab20b(np.linspace(0, 1, (len(results['position_results'])))) + ci = 0 + for position_results in results['position_results']: + pindex = position_results['pos_index'] + if position_results['calculation_status'] == 'FAILED': + # Draw nothing for failed position + pass + else: + samples = position_results['sample_from_filter_stages']['used_samples'] + target_x = samples[:]['targ_pos_x'] + target_y = samples[:]['targ_pos_y'] + if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + gaze_x = (samples[:]['left_eye_x'] + samples[:]['right_eye_x']) / 2.0 + gaze_y = (samples[:]['left_eye_y'] + samples[:]['right_eye_y']) / 2.0 + else: + gaze_x = samples[:]['eye_x'] + gaze_y = samples[:]['eye_y'] - msg_lists.append(current_msg) + pl.scatter(target_x[0], target_y[0], s=400, color=color_list[ci]) + pl.scatter(target_x[0], target_y[0], s=300, color=(0.75, 0.75, 0.75)) + pl.text(target_x[0], target_y[0], str(pindex), size=16, color=color_list[ci], + horizontalalignment='center', + verticalalignment='center') + pl.scatter(gaze_x, gaze_y, s=40, color=color_list[ci], alpha=0.75) + ci += 1 - if msg_lists[0][2] == 'NEXT_POS_TRIG': - # handle case where the trigger msg from the previous target - # message was not read until the start of the next pos. - # In which case, move msg to end of previous targ pos msgs - npm = msg_lists.pop(0) - self.target_pos_msgs[-1].append(npm) + if self.results_in_degrees: + l, b = toDeg(self.win, (-pixw / 2,), (-pixh / 2,)) + r, t = toDeg(self.win, (pixw / 2,), (pixh / 2,)) + else: + l, t, r, b = results['display_bounds'] - self.target_pos_msgs.append(msg_lists) + pl.xlim(l, r) + pl.ylim(b, t) + pl.xlabel('Horizontal Position (%s)' % (results['reporting_unit_type'])) + pl.ylabel('Vertical Position (%s)' % (results['reporting_unit_type'])) + pl.title('Validation Accuracy (%s)\nMin: %.4f, Max: %.4f, Mean %.4f' % (results['reporting_unit_type'], + results['min_error'], + results['max_error'], + results['mean_error'])) - for i in range(len(self.target_pos_msgs)): - self.target_pos_msgs[i] = np.asarray(self.target_pos_msgs[i], dtype=object) + fig.tight_layout() + return fig - return self.target_pos_msgs + def _generateImageName(self): + import datetime + file_name = 'validation_' + datetime.datetime.now().strftime('%d_%m_%Y_%H_%M') + '.png' + if self.save_figure_path: + return normjoin(self.save_figure_path, file_name) + rootScriptPath = os.path.dirname(sys.argv[0]) + return normjoin(rootScriptPath, file_name) - def getSampleMessageData(self): + def _buildResultScreen(self, replot=False): """ - Return a list of numpy ndarrays, each containing joined eye sample - and previous / next experiment message data for the sample's time. + Build validation results screen. + Currently saves the plot from .createPlot() to disk and the loads that as an image. + :param replot: + :return: """ - # preprocess message events - self._processMessageEvents() - - # inline func to return sample field array based on sample namedtup - def getSampleData(s): - sampledata = [s.time, s.status] - if self.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: - sampledata.extend((s.left_gaze_x, s.left_gaze_y, s.left_pupil_measure1, - s.right_gaze_x, s.right_gaze_y, s.right_pupil_measure1)) - return sampledata - - sampledata.extend((s.gaze_x, s.gaze_y, s.pupil_measure1)) - return sampledata - - current_target_pos = -1.0, -1.0 - current_targ_state = 0 - target_pos_samples = [] - for pindex, samples in enumerate(self.saved_pos_samples): - last_msg, messages = self.target_pos_msgs[pindex][0], self.target_pos_msgs[pindex][1:] - samplesforposition = [] - pos_sample_count = len(samples) - si = 0 - for current_msg in messages: - last_msg_time = last_msg[0] - last_msg_type = last_msg[2] - if last_msg_type == 'START_DRAW': - if not current_targ_state & self.TARGET_STATIONARY: - current_targ_state += self.TARGET_STATIONARY - current_targ_state -= current_targ_state & self.TARGET_MOVING - current_targ_state -= current_targ_state & self.TARGET_EXPANDING - current_targ_state -= current_targ_state & self.TARGET_CONTRACTING - elif last_msg_type == 'EXPAND_SIZE': - if not current_targ_state & self.TARGET_EXPANDING: - current_targ_state += self.TARGET_EXPANDING - current_targ_state -= current_targ_state & self.TARGET_CONTRACTING - elif last_msg_type == 'CONTRACT_SIZE': - if not current_targ_state & self.TARGET_CONTRACTING: - current_targ_state += self.TARGET_CONTRACTING - current_targ_state -= current_targ_state & self.TARGET_EXPANDING - elif last_msg_type == 'TARGET_POS': - current_target_pos = float(last_msg[3]), float(last_msg[4]) - current_targ_state -= current_targ_state & self.TARGET_MOVING - if not current_targ_state & self.TARGET_STATIONARY: - current_targ_state += self.TARGET_STATIONARY - elif last_msg_type == 'POS_UPDATE': - current_target_pos = float(last_msg[3]), float(last_msg[4]) - if not current_targ_state & self.TARGET_MOVING: - current_targ_state += self.TARGET_MOVING - current_targ_state -= current_targ_state & self.TARGET_STATIONARY - elif last_msg_type == 'SYNCTIME': - if not current_targ_state & self.TARGET_STATIONARY: - current_targ_state += self.TARGET_STATIONARY - current_targ_state -= current_targ_state & self.TARGET_MOVING - current_targ_state -= current_targ_state & self.TARGET_EXPANDING - current_targ_state -= current_targ_state & self.TARGET_CONTRACTING - current_target_pos = float(last_msg[6]), float(last_msg[7]) - - while si < pos_sample_count: - sample = samples[si] - if sample.time >= last_msg_time and sample.time < current_msg[0]: - sarray = [pindex, last_msg_time, last_msg_type, - current_msg[0], current_msg[2], - current_target_pos[0], current_target_pos[1], - current_targ_state] - sarray.extend(getSampleData(sample)) - sndarray = np.asarray(tuple(sarray), dtype=self.sample_msg_dtype) - samplesforposition.append(sndarray) - si += 1 - elif sample.time >= current_msg[0]: - break - else: - si += 1 - last_msg = current_msg - - possamples = np.asanyarray(samplesforposition) - target_pos_samples.append(possamples) - - # So we now have a list len == number target positions. Each element - # of the list is a list of all eye sample / message data for a - # target position. Each element of the data list for a single target - # position is itself a list that that contains combined info about - # an eye sample and message info valid for when the sample time was. - return np.asanyarray(target_pos_samples, dtype=object) - - -class ValidationProcedure(object): - def __init__(self, win=None, target=None, positions=None, target_animation={}, randomize_positions=True, - background=None, triggers=None, storeeventsfor=None, accuracy_period_start=0.350, - accuracy_period_stop=.050, show_intro_screen=True, intro_text='Ready to Start Validation Procedure.', - show_results_screen=True, results_in_degrees=False, save_figure_path=None, - terminate_key="escape", toggle_gaze_cursor_key="g"): - """ - ValidationProcedure can be used to check the accuracy of a calibrated - eye tracking system. - - Once a ValidationProcedure class instance has been created, the display(**kwargs) method - can be called to run the validation process. - - The validation process consists of the following stages: - - 1) Display an Introduction / Instruction screen. A key press is used to - start target presentation. - 2) The validation target presentation sequence. Based on the Target and - PositionGrid objects provided when the ValidationProcedure was created, - a series of target positions are displayed. The progression from one - target position to the next is controlled by the triggers specified. - The target can simply jump from one position to the next, or optional - linear motion settings can be used to have the target move across the - screen from one point to the next. The Target graphic itself can also - be configured to expand or contract once it has reached a location - defined in the position grid. - 3) During stage 2), data is collected from the devices being monitored by - iohub. Specifically eye tracker samples and experiment messages are - collected. - 4) The data collected during the validation target sequence is used to - calculate accuracy information for each target position presented. - The raw data as well as the computed accuracy data is available via the - ValidationProcedure class. Calculated measures are provided seperately - for each target position and include: - - a) An array of the samples used for the accuracy calculation. The - samples used are selected using the following criteria: - i) Only samples where the target was stationary and - not expanding or contracting are selected. + if replot or self.imagestim is None: + iname = self._generateImageName() + self.createPlot().savefig(iname, dpi=self.use_dpi) - ii) Samples are selected that fall between: + text_pos = (0, 0) + text = 'Accuracy Calculation not Possible do to Analysis Error. Press SPACE to continue.' - start_time_filter = last_sample_time - accuracy_period_start + if iname: + fig_image = Image.open(iname) - and + if self.imagestim: + self.imagestim.setImage(fig_image) + else: + self.imagestim = visual.ImageStim(self.win, image=fig_image, units='pix', pos=(0.0, 0.0)) - end_time_filter = last_sample_time - accuracy_period_end + text = 'Press SPACE to continue.' + text_pos = (0.0, -(self.win.size[1] / 2.0) * .9) + else: + self.imagestim = None - Therefore, the duration of the selected sample period is: + if self.textstim is None: + self.textstim = visual.TextStim(self.win, text=text, pos=text_pos, color=(0, 0, 0), colorSpace='rgb255', + opacity=1.0, contrast=1.0, units='pix', ori=0.0, height=None, + antialias=True, bold=False, italic=False, anchorVert='center', + anchorHoriz='center', wrapWidth=self.win.size[0] * .8) + else: + self.textstim.setText(text) + self.textstim.setPos(text_pos) - selection_period_dur = end_time_filter - start_time_filter + elif self.imagestim: + return True + return False - iii) Sample that contain missing / invalid position data - are then removed, providing the final set of samples - used for accuracy calculations. The min, max, and mean - values from each set of selected samples is calculated. - b) The x and y error of each samples gaze position relative to the - current target position. This data is in the same units as is - used by the Target instance. Computations are done for each eye - being recorded. The values are signed floats. +class ValidationTargetRenderer(object): + TARGET_STATIONARY = 1 + TARGET_MOVING = 2 + TARGET_EXPANDING = 4 + TARGET_CONTRACTING = 8 + # Experiment Message text field types and tokens + message_types = dict(BEGIN_SEQUENCE=('BEGIN_SEQUENCE', '', int), + DONE_SEQUENCE=('DONE_SEQUENCE', '', int), + NEXT_POS_TRIG=('NEXT_POS_TRIG', '', int, float), + START_DRAW=('START_DRAW', ',', int, float, float, float, float), + SYNCTIME=('SYNCTIME', ',', int, float, float, float, float), + EXPAND_SIZE=('EXPAND_SIZE', '', float, float), + CONTRACT_SIZE=('CONTRACT_SIZE', '', float, float), + POS_UPDATE=('POS_UPDATE', ',', float, float), + TARGET_POS=('TARGET_POS', ',', float, float)) + max_msg_type_length = max([len(s) for s in message_types.keys()]) + binocular_sample_message_element = [('targ_pos_ix', np.int), + ('last_msg_time', np.float64), + ('last_msg_type', np.str, max_msg_type_length), + ('next_msg_time', np.float64), + ('next_msg_type', np.str, max_msg_type_length), + ('targ_pos_x', np.float64), + ('targ_pos_y', np.float64), + ('targ_state', np.int), + ('eye_time', np.float64), + ('eye_status', np.int), + ('left_eye_x', np.float64), + ('left_eye_y', np.float64), + ('left_pupil_size', np.float64), + ('right_eye_x', np.float64), + ('right_eye_y', np.float64), + ('right_pupil_size', np.float64)] + monocular_sample_message_element = [('targ_pos_ix', np.int), + ('last_msg_time', np.float64), + ('last_msg_type', np.str, max_msg_type_length), + ('next_msg_time', np.float64), + ('next_msg_type', np.str, max_msg_type_length), + ('targ_pos_x', np.float64), + ('targ_pos_y', np.float64), + ('targ_state', np.int), + ('eye_time', np.float64), + ('eye_status', np.int), + ('eye_x', np.float64), + ('eye_y', np.float64), + ('pupil_size', np.float64)] - c) The xy distance error from the from each eye's gaze position to - the target position. This is also calculated as an average of - both eyes when binocular data is available. The data is unsigned, - providing the absolute distance from gaze to target positions + def __init__(self, win, target, positions, background=None, storeeventsfor=[], triggers=None, msgcategory='', + io=None, terminate_key='escape', gaze_cursor_key='g'): + """ + ValidationTargetRenderer is an internal class used by `ValidationProcedure`. - 5) A 2D plot is created displaying each target position and the position of - each sample used for the accuracy calculation. The minimum, maximum, and - average error is displayed for all target positions. A key press is used - to remove the validation results plot, and control is returned to the - script that started the validation display. Note that the plot is also - saved as a png file in the same directory as the calling stript. + psychopy.iohub.Trigger based classes are used to define the criteria used to + start displaying the next target position graphics. By providing a set of + DeviceEventTriggers, complex criteria for target position pacing can be defined. - See the validation.py demo in demos.coder.iohub.eyetracker for example usage. + iohub devices can be provided in the storeeventsfor keyword argument. + Events which occur during each target position presentation period are + stored and are available at the end of the display() period, grouped by + position index and device event types. :param win: :param target: :param positions: - :param target_animation: - :param randomize_positions: :param background: - :param triggers: :param storeeventsfor: - :param accuracy_period_start: - :param accuracy_period_stop: - :param show_intro_screen: - :param intro_text: - :param show_results_screen: - :param results_in_degrees: - :param save_figure_path: - :param terminate_key: - :param toggle_gaze_cursor_key: + :param triggers: + :param msgcategory: + :param io: """ - print("TODO: Add max error threshold to filter 'valid' samples for each target position.") self.terminate_key = terminate_key - self.toggle_gaze_cursor_key = toggle_gaze_cursor_key - - self.io = ioHubConnection.getActiveConnection() - - if isinstance(positions, (list, tuple)): - positions = PositionGrid(posList=positions, firstposindex=0, repeatFirstPos=False) + self.gaze_cursor_key = gaze_cursor_key + self.display_gaze = False + gc_size = deg2pix(3.0, win.monitor, correctFlat=False) + self.gaze_cursor = visual.GratingStim(win, tex=None, mask='gauss', pos=(0, 0), size=(gc_size, gc_size), + color='green', units='pix', opacity=0.8) + self._terminate_requested = False + self.win = proxy(win) + self.target = target + self.background = background self.positions = positions + self.storeevents = storeeventsfor + self.msgcategory = msgcategory - self.randomize_positions = randomize_positions - if self.randomize_positions: - self.positions.randomize() - self.win = proxy(win) - if target_animation is None: - target_animation = {} - self.animation_params = target_animation - self.accuracy_period_start = accuracy_period_start - self.accuracy_period_stop = accuracy_period_stop - self.show_intro_screen = show_intro_screen - self.intro_text = intro_text - self.show_results_screen = show_results_screen - self.results_in_degrees = results_in_degrees - self.save_figure_path = save_figure_path - self.validation_results = None - if storeeventsfor is None: - storeeventsfor = [self.io.devices.keyboard, - self.io.devices.mouse, - self.io.devices.tracker, - self.io.devices.experiment - ] + if io is None: + io = ioHubConnection.getActiveConnection() + self.io = io + self._keyboard = self.io.devices.keyboard - if triggers is None: - # Use space key press as default target trigger - triggers = KeyboardTrigger(' ', on_press=True) - triggers = Trigger.getTriggersFrom(triggers) + # If storeevents is True, targetdata will be a list of dict's. + # Each dict, among other things, contains all ioHub events that occurred + # from when a target was first presented at a position, to when the + # the wait period completed for that position. + # + self.targetdata = [] + self.triggers = triggers - # Create the ValidationTargetRenderer instance; used to control the sequential - # presentation of the target at each of the grid positions. - self.targetsequence = ValidationTargetRenderer(win, target=target, positions=self.positions, background=background, - triggers=triggers, storeeventsfor=storeeventsfor, - terminate_key=terminate_key, gaze_cursor_key=toggle_gaze_cursor_key) - # Stim for results screen - self.imagestim = None - self.textstim = None - self.use_dpi = 90 + def _draw(self): + """ + Fill the window with the specified background color and draw the + target stim. + """ + if self.background: + self.background.draw() + self.target.draw() + if self.display_gaze: + gpos = self.io.devices.tracker.getLastGazePosition() + valid_gaze_pos = isinstance(gpos, (tuple, list)) + if valid_gaze_pos: + pix_pos = toPix(self.win, *gpos) + pix_pos = pix_pos[0][0], pix_pos[1][0] + self.gaze_cursor.setPos(pix_pos) + self.gaze_cursor.draw() - def run(self): + def _animateTarget(self, topos, frompos, **kwargs): """ - Run the validation procedure, returning after the full validation process is complete, including: - a) display of an instruction screen - b) display of the target position sequence used for validation data collection. - c) display of a validation accuracy results plot. + Any logic related to drawing the target at the new screen position, + including any intermediate animation effects, is done here. + + Return the flip time when the target was first drawn at the newpos + location. """ - keyboard = self.io.devices.keyboard - if self.show_intro_screen: - # Display Validation Intro Screen - self.showIntroScreen() - if self.terminate_key and self.terminate_key in keyboard.waitForReleases(keys=[' ', self.terminate_key]): - print("Escape key pressed. Exiting validation") - self.validation_results = None - return + io = self.io + if frompos is not None: + velocity = kwargs.get('velocity') + if velocity: + starttime = getTime() + a, b = np.abs(topos - frompos) ** 2 + duration = np.sqrt(a + b) / velocity + arrivetime = duration + starttime + fliptime = starttime + while fliptime < arrivetime: + mu = (fliptime - starttime) / duration + tpos = frompos * (1.0 - mu) + topos * mu + self.target.setPos(frompos * (1.0 - mu) + topos * mu) + self._draw() + fliptime = self.win.flip() + io.sendMessageEvent('POS_UPDATE %.4f,%.4f' % (tpos[0], tpos[1]), self.msgcategory, + sec_time=fliptime) + self._addDeviceEvents() + if self._terminate_requested: + return 0 - # Perform Validation..... - terminate = not self.targetsequence.display(**self.animation_params) - if terminate: - print("Escape key pressed. Exiting validation") - self.validation_results = None - return + self.target.setPos(topos) + self._draw() + fliptime = self.win.flip() + io.sendMessageEvent('TARGET_POS %.4f,%.4f' % (topos[0], topos[1]), self.msgcategory, sec_time=fliptime) + self._addDeviceEvents() - self.io.clearEvents('all') + expandedscale = kwargs.get('expandedscale') + expansionduration = kwargs.get('expansionduration') + contractionduration = kwargs.get('contractionduration') - self._createValidationResults() + initialradius = self.target.radius + if expandedscale: + expandedradius = self.target.radius * expandedscale - if self.show_results_screen: - if self.showResultsScreen() is not None: - if self.terminate_key and self.terminate_key in keyboard.waitForPresses(keys=[' ', self.terminate_key]): - print("Escape key pressed. Exiting validation") - self.validation_results = None - return - return self.validation_results + if expansionduration: + starttime = fliptime + expandedtime = fliptime + expansionduration + while fliptime < expandedtime: + mu = (fliptime - starttime) / expansionduration + cradius = initialradius * (1.0 - mu) + expandedradius * mu + self.target.setRadius(cradius) + self._draw() + fliptime = self.win.flip() + io.sendMessageEvent('EXPAND_SIZE %.4f %.4f' % (cradius, initialradius), self.msgcategory, + sec_time=fliptime) + self._addDeviceEvents() + if self._terminate_requested: + return 0 + if contractionduration: + starttime = fliptime + contractedtime = fliptime + contractionduration + while fliptime < contractedtime: + mu = (fliptime - starttime) / contractionduration + cradius = expandedradius * (1.0 - mu) + initialradius * mu + self.target.setRadius(cradius) + self._draw() + fliptime = self.win.flip() + io.sendMessageEvent('CONTRACT_SIZE %.4f %.4f' % (cradius, initialradius), self.msgcategory, + sec_time=fliptime) + self._addDeviceEvents() + if self._terminate_requested: + return 0 - def showResultsScreen(self): - self._buildResultScreen() - if self.imagestim: - self.imagestim.draw() - self.textstim.draw() - return self.win.flip() + self.target.setRadius(initialradius) + return fliptime - def showIntroScreen(self): - text = self.intro_text + '\nPress SPACE to Start....' - textpos = (0, 0) - if self.textstim: - self.textstim.setText(text) - self.textstim.setPos(textpos) - else: - self.textstim = visual.TextStim(self.win, text=text, pos=textpos, height=30, color=(0, 0, 0), - colorSpace='rgb255', opacity=1.0, contrast=1.0, units='pix', - ori=0.0, antialias=True, bold=False, italic=False, anchorHoriz='center', - anchorVert='center', wrapWidth=self.win.size[0] * .8) + def moveTo(self, topos, frompos, **kwargs): + """ + Indicates that the target should be moved frompos to topos. - self.textstim.draw() - return self.win.flip() + If a PositionGrid has been provided, moveTo should not be called + directly. Instead, use the display() method to start the full + target position presentation sequence. + """ + io = self.io + fpx, fpy = -1, -1 + if frompos is not None: + fpx, fpy = frompos[0], frompos[1] + io.sendMessageEvent('START_DRAW %d %.4f,%.4f %.4f,%.4f' % (self.positions.posIndex, fpx, fpy, topos[0], + topos[1]), self.msgcategory) + fliptime = self._animateTarget(topos, frompos, **kwargs) + io.sendMessageEvent('SYNCTIME %d %.4f,%.4f %.4f,%.4f' % (self.positions.posIndex, fpx, fpy, topos[0], topos[1]), + self.msgcategory, sec_time=fliptime) - def getValidationResults(self): - return self.validation_results + # wait for trigger to fire + last_pump_time = fliptime + trig_fired = self._hasTriggerFired(start_time=fliptime) + while not trig_fired: + if getTime() - last_pump_time >= 0.250: + win32MessagePump() + last_pump_time = getTime() + + if self.display_gaze: + self._draw() + self.win.flip() + else: + sleep(0.001) + + if self._checkForTerminate(): + return + self._checkForToggleGaze() + trig_fired = self._hasTriggerFired(start_time=fliptime) - def _createValidationResults(self): + def _hasTriggerFired(self, **kwargs): """ - Create validation results dict and save validation analysis info as experiment messages to - the iohub .hdf5 file. - - :return: dict + Used internally to know when one of the triggers has occurred and + the target should move to the next target position. """ - self.validation_results = None - sample_array = self.targetsequence.getSampleMessageData() - - if self.results_in_degrees: - for postdat in sample_array: - postdat['targ_pos_x'], postdat['targ_pos_y'] = toDeg(self.win, *toPix(self.win, postdat['targ_pos_x'], postdat['targ_pos_y'])) + # wait for trigger to fire + triggered = None + for trig in self.triggers: + if trig.triggered(**kwargs): + triggered = trig + break + self._addDeviceEvents(trig.clearEventHistory(True)) + if triggered: + # by default, assume it was a timer trigger,so use 255 as 'event type' + event_type_id = 255 + trig_evt = triggered.getTriggeringEvent() + if hasattr(trig_evt, 'type'): + # actually it was a device event trigger + event_type_id = trig_evt.type + # get time trigger of trigger event + event_time = triggered.getTriggeringTime() + self.io.sendMessageEvent('NEXT_POS_TRIG %d %.3f' % (event_type_id, event_time), self.msgcategory) + for trig in self.triggers: + trig.resetTrigger() + return triggered - if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: - postdat['left_eye_x'], postdat['left_eye_y'] = toDeg(self.win, *toPix(self.win, postdat['left_eye_x'], - postdat['left_eye_y'])) + def _initTargetData(self, frompos, topos): + """ + Internally used to create the data structure used to store position + information and events which occurred during each target position + period. + """ + if self.storeevents: + deviceevents = {} + for device in self.storeevents: + deviceevents[device] = [] + self.targetdata.append(dict(frompos=frompos, topos=topos, events=deviceevents)) - postdat['right_eye_x'], postdat['right_eye_y'] = toDeg(self.win, *toPix(self.win, postdat['right_eye_x'], - postdat['right_eye_y'])) - else: - postdat['eye_x'], postdat['eye_y'] = toDeg(self.win, *toPix(self.win, postdat['eye_x'], postdat['eye_y'])) + def _addDeviceEvents(self, device_event_dict={}): + if self._checkForTerminate(): + return + self._checkForToggleGaze() + dev_event_buffer = self.targetdata[-1]['events'] + for dev, dev_events in dev_event_buffer.items(): + if dev in device_event_dict: + dev_events.extend(device_event_dict[dev]) + else: + dev_events.extend(dev.getEvents()) - min_error = 100000.0 - max_error = 0.0 - summed_error = 0.0 - point_count = 0 + def _checkForTerminate(self): + keys = self._keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=False) + for k in keys: + if k.key == self.terminate_key: + self._terminate_requested = True + break + return self._terminate_requested - self.io.sendMessageEvent('Results', 'VALIDATION') - results = dict(display_units=self.win.units, display_bounds=self.positions.bounds, - display_pix=self.win.size, position_count=len(sample_array), - target_positions=self.targetsequence.positions.getPositions()) + def _checkForToggleGaze(self): + keys = self._keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=False) + for k in keys: + if k.key == self.gaze_cursor_key: + # get (clear) the event so it does not trigger multiple times. + self._keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=True) + self.display_gaze = not self.display_gaze + self._draw() + self.win.flip() + return self.display_gaze + return self.display_gaze - for k, v in results.items(): - self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') + def display(self, **kwargs): + """ + Display the target at each point in the position grid, performing + target animation if requested. The target then holds position until one + of the specified triggers occurs, resulting in the target moving to the + next position in the positiongrid. - results['position_results'] = [] - results['positions_failed_processing'] = 0 + To setup target animation between grid positions, the following keyword + arguments are supported. If an option is not specified, the animation + related to it is not preformed. - for pindex, samplesforpos in enumerate(sample_array): - self.io.sendMessageEvent('Target Position Results: {0}'.format(pindex), 'VALIDATION') + velocity: The rate (units / second) at which the target should move + from a current target position to the next target position. + The value should be in the unit type the target stimulus + is using. - stationary_samples = samplesforpos[samplesforpos['targ_state'] == self.targetsequence.TARGET_STATIONARY] + expandedscale: When a target stimulus is at the current grid position, + the target graphic can expand to a size equal to the + original target radius * expandedscale. - if len(stationary_samples): - last_stime = stationary_samples[-1]['eye_time'] - first_stime = stationary_samples[0]['eye_time'] + expansionduration: If expandedscale has been specified, this option is + used to set how long it should take for the target to + reach the full expanded target size. Time is in sec. - filter_stime = last_stime - self.accuracy_period_start - filter_etime = last_stime - self.accuracy_period_stop + contractionduration: If a target has been expanded, this option is used + to specify how many seconds it should take for the + target to contract back to the original target + radius. - all_samples_in_period = stationary_samples[stationary_samples['eye_time'] >= filter_stime] - all_samples_in_period = all_samples_in_period[all_samples_in_period['eye_time'] < filter_etime] + Note that target expansion and contraction change the target stimulus + outer diameter only. The edge thickness and central dot radius do not + change. - good_samples_in_period = all_samples_in_period[all_samples_in_period['eye_status'] == 0] + All movement and size changes are linear in fashion. - all_samples_count = all_samples_in_period.shape[0] - good_sample_count = good_samples_in_period.shape[0] - try: - good_sample_ratio = good_sample_count / float(all_samples_count) - except ZeroDivisionError: - good_sample_ratio = 0 - else: - all_samples_in_period = [] - good_samples_in_period = [] - good_sample_ratio = 0 + For example, to display a static target at each grid position:: - # Ordered dictionary of the different levels of samples selected during filtering - # for valid samples to use in accuracy calculations. - sample_msg_data_filtering = OrderedDict(all_samples=samplesforpos, # All samples from target period. - # Sample during stationary period at end of target - # presentation display. - stationary_samples=stationary_samples, - # Samples that occurred within the - # defined time selection period. - time_filtered_samples=all_samples_in_period, - # Samples from the selection period that - # do not have missing data - used_samples=good_samples_in_period) + targetsequence.display() - position_results = dict(pos_index=pindex, - sample_time_range=[first_stime, last_stime], - filter_samples_time_range=[filter_stime, filter_etime], - valid_filtered_sample_perc=good_sample_ratio) + To have the target stim move between each grid position + at 400 pixels / sec and not expand or contract:: - for k, v in position_results.items(): - self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') + targetsequence.display(velocity=400.0) - position_results['sample_from_filter_stages'] = sample_msg_data_filtering + If the target should jump from one grid position to the next, and then + expand to twice the radius over a 0.5 second period:: - if int(good_sample_ratio*100) == 0: - position_results['calculation_status'] = 'FAILED' - results['positions_failed_processing'] += 1 - else: - target_x = good_samples_in_period[:]['targ_pos_x'] - target_y = good_samples_in_period[:]['targ_pos_y'] + targetsequence.display( + expandedscale=2.0, + expansionduration=0.50 + ) - if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: - left_x = good_samples_in_period[:]['left_eye_x'] - left_y = good_samples_in_period[:]['left_eye_y'] - left_error_x = target_x - left_x - left_error_y = target_y - left_y - left_error_xy = np.hypot(left_error_x, left_error_y) + To do a similar animation as the pervious example, but also have the + target contract back to it's original size over 0.75 seconds:: - right_x = good_samples_in_period[:]['right_eye_x'] - right_y = good_samples_in_period[:]['right_eye_y'] - right_error_x = target_x - right_x - right_error_y = target_y - right_y - right_error_xy = np.hypot(right_error_x, right_error_y) + targetsequence.display( + expandedscale=2.0, + expansionduration=0.50, + contractionduration=0.75 + ) - lr_error = (right_error_xy + left_error_xy) / 2.0 - lr_error_max = lr_error.max() - lr_error_min = lr_error.min() - lr_error_mean = lr_error.mean() - lr_error_std = np.std(lr_error) - min_error = min(min_error, lr_error_min) - max_error = max(max_error, lr_error_max) - summed_error += lr_error_mean - point_count += 1.0 - else: - eye_x = good_samples_in_period[:]['eye_x'] - eye_y = good_samples_in_period[:]['eye_y'] - error_x = target_x - eye_x - error_y = target_y - eye_y - error_xy = np.hypot(error_x, error_y) - lr_error = error_xy - lr_error_max = lr_error.max() - lr_error_min = lr_error.min() - lr_error_mean = lr_error.mean() - lr_error_std = np.std(lr_error) - min_error = min(min_error, lr_error_min) - max_error = max(max_error, lr_error_max) - summed_error += lr_error_mean - point_count += 1.0 + When this method returns, the target has been displayed at all + positions. Data collected for each position period can be accessed via + the targetdata attribute. + """ + del self.targetdata[:] + prevpos = None - position_results2 = dict() - position_results2['calculation_status'] = 'PASSED' - position_results2['target_position'] = (target_x[0], target_y[0]) - position_results2['min_error'] = lr_error_min - position_results2['max_error'] = lr_error_max - position_results2['mean_error'] = lr_error_mean - position_results2['stdev_error'] = lr_error_std - for k, v in position_results2.items(): - self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') - position_results[k] = v - results['position_results'].append(position_results) - self.io.sendMessageEvent('Done Target Position Results : {0}'.format(pindex), 'VALIDATION') + io = self.io + io.clearEvents('all') + io.sendMessageEvent('BEGIN_SEQUENCE {0}'.format(len(self.positions.positions)), self.msgcategory) + turn_rec_off = [] + for d in self.storeevents: + if not d.isReportingEvents(): + d.enableEventReporting(True) + turn_rec_off.append(d) - unit_type = self.win.units - if self.results_in_degrees: - unit_type = 'degrees' - mean_error = summed_error / point_count - err_results = dict(reporting_unit_type=unit_type, min_error=min_error, max_error=max_error, - mean_error=mean_error, passed=results['positions_failed_processing'] == 0, - positions_failed_processing=results['positions_failed_processing']) + sleep(0.025) + for pos in self.positions: + self._initTargetData(prevpos, pos) + self._addDeviceEvents() + if self._terminate_requested: + break + self.moveTo(pos, prevpos, **kwargs) + prevpos = pos + self._addDeviceEvents() + if self._terminate_requested: + break - for k, v in err_results.items(): - self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') - results[k] = v + for d in turn_rec_off: + d.enableEventReporting(False) - self.io.sendMessageEvent('Validation Report Complete', 'VALIDATION') + if self._terminate_requested: + io.sendMessageEvent('VALIDATION TERMINATED BY USER', self.msgcategory) + return False - self.validation_results = results - return self.validation_results + io.sendMessageEvent('DONE_SEQUENCE {0}'.format( len(self.positions.positions)), self.msgcategory) + sleep(0.025) + self._addDeviceEvents() + io.clearEvents('all') + return True - def createPlot(self): - """ - Creates a matplotlib figure of validation results. - :return: - """ - from matplotlib import pyplot as pl + def _processMessageEvents(self): + self.target_pos_msgs = [] + self.saved_pos_samples = [] + for pd in self.targetdata: + frompos = pd.get('frompos') + topos = pd.get('topos') + events = pd.get('events') - results = self.getValidationResults() - if results is None: - raise RuntimeError("Validation must be run before creating results plot.") + # create a dict of device labels as keys, device events as value + devlabel_events = {} + for k, v in events.items(): + devlabel_events[k.getName()] = v - pixw, pixh = results['display_pix'] + samples = devlabel_events.get('tracker', []) + # remove any eyetracker events that are not samples + samples = [s for s in samples if s.type in (EventConstants.BINOCULAR_EYE_SAMPLE, + EventConstants.MONOCULAR_EYE_SAMPLE)] + self.saved_pos_samples.append(samples) - pl.clf() - fig = pl.gcf() - fig.set_size_inches((pixw * .9) / self.use_dpi, (pixh * .8) / self.use_dpi) - color_list = pl.cm.tab20b(np.linspace(0, 1, (len(results['position_results'])))) - ci = 0 - for position_results in results['position_results']: - pindex = position_results['pos_index'] - if position_results['calculation_status'] == 'FAILED': - # Draw nothing for failed position - pass + self.sample_type = self.saved_pos_samples[0][0].type + if self.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + self.sample_msg_dtype = self.binocular_sample_message_element else: - samples = position_results['sample_from_filter_stages']['used_samples'] - target_x = samples[:]['targ_pos_x'] - target_y = samples[:]['targ_pos_y'] - if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: - gaze_x = (samples[:]['left_eye_x'] + samples[:]['right_eye_x']) / 2.0 - gaze_y = (samples[:]['left_eye_y'] + samples[:]['right_eye_y']) / 2.0 - else: - gaze_x = samples[:]['eye_x'] - gaze_y = samples[:]['eye_y'] + self.sample_msg_dtype = self.monocular_sample_message_element - pl.scatter(target_x[0], target_y[0], s=400, color=color_list[ci]) - pl.scatter(target_x[0], target_y[0], s=300, color=(0.75, 0.75, 0.75)) - pl.text(target_x[0], target_y[0], str(pindex), size=16, color=color_list[ci], horizontalalignment='center', - verticalalignment='center') - pl.scatter(gaze_x, gaze_y, s=40, color=color_list[ci], alpha=0.75) - ci += 1 + messages = devlabel_events.get('experiment', []) + msg_lists = [] + for m in messages: + temp = m.text.strip().split() + msg_type = self.message_types.get(temp[0]) + if msg_type: + current_msg = [m.time, m.category] + if msg_type[1] == ',': + for t in temp: + current_msg.extend(t.split(',')) + else: + current_msg.extend(temp) - if self.results_in_degrees: - l, b = toDeg(self.win, (-pixw / 2,), (-pixh / 2, )) - r, t = toDeg(self.win, (pixw / 2, ), (pixh / 2, )) - else: - l, t, r, b = results['display_bounds'] + for mi, dtype in enumerate(msg_type[2:]): + current_msg[mi + 3] = dtype(current_msg[mi + 3]) - pl.xlim(l, r) - pl.ylim(b, t) - pl.xlabel('Horizontal Position (%s)' % (results['reporting_unit_type'])) - pl.ylabel('Vertical Position (%s)' % (results['reporting_unit_type'])) - pl.title('Validation Accuracy (%s)\nMin: %.4f, Max: %.4f, Mean %.4f' % (results['reporting_unit_type'], - results['min_error'], - results['max_error'], - results['mean_error'])) + msg_lists.append(current_msg) - fig.tight_layout() - return fig + if msg_lists[0][2] == 'NEXT_POS_TRIG': + # handle case where the trigger msg from the previous target + # message was not read until the start of the next pos. + # In which case, move msg to end of previous targ pos msgs + npm = msg_lists.pop(0) + self.target_pos_msgs[-1].append(npm) - def _generateImageName(self): - import datetime - file_name = 'validation_' + datetime.datetime.now().strftime('%d_%m_%Y_%H_%M') + '.png' - if self.save_figure_path: - return normjoin(self.save_figure_path, file_name) - rootScriptPath = os.path.dirname(sys.argv[0]) - return normjoin(rootScriptPath, file_name) + self.target_pos_msgs.append(msg_lists) - def _buildResultScreen(self, replot=False): + for i in range(len(self.target_pos_msgs)): + self.target_pos_msgs[i] = np.asarray(self.target_pos_msgs[i], dtype=object) + + return self.target_pos_msgs + + def getSampleMessageData(self): """ - Build validation results screen. - Currently saves the plot from .createPlot() to disk and the loads that as an image. - :param replot: - :return: + Return a list of numpy ndarrays, each containing joined eye sample + and previous / next experiment message data for the sample's time. """ - if replot or self.imagestim is None: - iname = self._generateImageName() - self.createPlot().savefig(iname, dpi=self.use_dpi) + # preprocess message events + self._processMessageEvents() - text_pos = (0, 0) - text = 'Accuracy Calculation not Possible do to Analysis Error. Press SPACE to continue.' + # inline func to return sample field array based on sample namedtup + def getSampleData(s): + sampledata = [s.time, s.status] + if self.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + sampledata.extend((s.left_gaze_x, s.left_gaze_y, s.left_pupil_measure1, + s.right_gaze_x, s.right_gaze_y, s.right_pupil_measure1)) + return sampledata - if iname: - fig_image = Image.open(iname) + sampledata.extend((s.gaze_x, s.gaze_y, s.pupil_measure1)) + return sampledata - if self.imagestim: - self.imagestim.setImage(fig_image) - else: - self.imagestim = visual.ImageStim(self.win, image=fig_image, units='pix', pos=(0.0, 0.0)) + current_target_pos = -1.0, -1.0 + current_targ_state = 0 + target_pos_samples = [] + for pindex, samples in enumerate(self.saved_pos_samples): + last_msg, messages = self.target_pos_msgs[pindex][0], self.target_pos_msgs[pindex][1:] + samplesforposition = [] + pos_sample_count = len(samples) + si = 0 + for current_msg in messages: + last_msg_time = last_msg[0] + last_msg_type = last_msg[2] + if last_msg_type == 'START_DRAW': + if not current_targ_state & self.TARGET_STATIONARY: + current_targ_state += self.TARGET_STATIONARY + current_targ_state -= current_targ_state & self.TARGET_MOVING + current_targ_state -= current_targ_state & self.TARGET_EXPANDING + current_targ_state -= current_targ_state & self.TARGET_CONTRACTING + elif last_msg_type == 'EXPAND_SIZE': + if not current_targ_state & self.TARGET_EXPANDING: + current_targ_state += self.TARGET_EXPANDING + current_targ_state -= current_targ_state & self.TARGET_CONTRACTING + elif last_msg_type == 'CONTRACT_SIZE': + if not current_targ_state & self.TARGET_CONTRACTING: + current_targ_state += self.TARGET_CONTRACTING + current_targ_state -= current_targ_state & self.TARGET_EXPANDING + elif last_msg_type == 'TARGET_POS': + current_target_pos = float(last_msg[3]), float(last_msg[4]) + current_targ_state -= current_targ_state & self.TARGET_MOVING + if not current_targ_state & self.TARGET_STATIONARY: + current_targ_state += self.TARGET_STATIONARY + elif last_msg_type == 'POS_UPDATE': + current_target_pos = float(last_msg[3]), float(last_msg[4]) + if not current_targ_state & self.TARGET_MOVING: + current_targ_state += self.TARGET_MOVING + current_targ_state -= current_targ_state & self.TARGET_STATIONARY + elif last_msg_type == 'SYNCTIME': + if not current_targ_state & self.TARGET_STATIONARY: + current_targ_state += self.TARGET_STATIONARY + current_targ_state -= current_targ_state & self.TARGET_MOVING + current_targ_state -= current_targ_state & self.TARGET_EXPANDING + current_targ_state -= current_targ_state & self.TARGET_CONTRACTING + current_target_pos = float(last_msg[6]), float(last_msg[7]) - text = 'Press SPACE to continue.' - text_pos = (0.0, -(self.win.size[1] / 2.0) * .9) - else: - self.imagestim = None + while si < pos_sample_count: + sample = samples[si] + if sample.time >= last_msg_time and sample.time < current_msg[0]: + sarray = [pindex, last_msg_time, last_msg_type, + current_msg[0], current_msg[2], + current_target_pos[0], current_target_pos[1], + current_targ_state] + sarray.extend(getSampleData(sample)) + sndarray = np.asarray(tuple(sarray), dtype=self.sample_msg_dtype) + samplesforposition.append(sndarray) + si += 1 + elif sample.time >= current_msg[0]: + break + else: + si += 1 + last_msg = current_msg - if self.textstim is None: - self.textstim = visual.TextStim(self.win, text=text, pos=text_pos, color=(0, 0, 0), colorSpace='rgb255', - opacity=1.0, contrast=1.0, units='pix', ori=0.0, height=None, - antialias=True, bold=False, italic=False, anchorVert='center', - anchorHoriz='center', wrapWidth=self.win.size[0] * .8) - else: - self.textstim.setText(text) - self.textstim.setPos(text_pos) + possamples = np.asanyarray(samplesforposition) + target_pos_samples.append(possamples) - elif self.imagestim: - return True - return False + # So we now have a list len == number target positions. Each element + # of the list is a list of all eye sample / message data for a + # target position. Each element of the data list for a single target + # position is itself a list that that contains combined info about + # an eye sample and message info valid for when the sample time was. + return np.asanyarray(target_pos_samples, dtype=object) + + +def toPix(win, x, y): + """Returns the stim's position in pixels, + based on its pos, units, and win. + """ + try: + xy = np.zeros((len(x), 2)) + except TypeError: + xy = np.zeros((1, 2)) + + xy[:, 0] = x + xy[:, 1] = y + r = convertToPix(np.asarray((0, 0)), xy, win.units, win) + return r[:, 0], r[:, 1] + + +def toDeg(win, x, y): + try: + xy = np.zeros((len(x), 2)) + except TypeError: + xy = np.zeros((1, 2)) + xy[:, 0] = x + xy[:, 1] = y + r = pix2deg(xy, win.monitor, correctFlat=False) + return r[:, 0], r[:, 1] \ No newline at end of file From 8104cdf996fa30d67bc9d7e8a4f5f0f2ab9cb843 Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Thu, 1 Apr 2021 08:33:48 -0300 Subject: [PATCH 13/26] RF: Switching validation results screen to psychopy gfx validation results screen now being drawing using psychopy gfx instead of matplotlib. --- .../coder/iohub/eyetracking/validation.py | 4 +- .../client/eyetracker/validation/procedure.py | 119 +++++++++++------- 2 files changed, 73 insertions(+), 50 deletions(-) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation.py b/psychopy/demos/coder/iohub/eyetracking/validation.py index 9ae85a59f76..697ddaa908c 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation.py @@ -38,8 +38,8 @@ # target_positions: Provide your own list of validation positions, # or use the PositionGrid class to generate a set. - target_positions = [(0.0, 0.0), (0.85, 0.85), (-0.85, 0.0), (0.85, 0.0), (0.85, -0.85), (-0.85, 0.85), - (-0.85, -0.85), (0.0, 0.85), (0.0, -0.85)] + target_positions = [(0.0, 0.0), (0.85, 0.85), (-0.85, 0.0),]# (0.85, 0.0), (0.85, -0.85), (-0.85, 0.85), + # (-0.85, -0.85), (0.0, 0.85), (0.0, -0.85)] # Create a validation procedure validation_proc = ValidationProcedure(win, diff --git a/psychopy/iohub/client/eyetracker/validation/procedure.py b/psychopy/iohub/client/eyetracker/validation/procedure.py index e0d2eb495a3..41b18eb0f40 100644 --- a/psychopy/iohub/client/eyetracker/validation/procedure.py +++ b/psychopy/iohub/client/eyetracker/validation/procedure.py @@ -20,7 +20,7 @@ from time import sleep import os import sys -from PIL import Image +from matplotlib import pyplot as pl from collections import OrderedDict from psychopy import visual, core @@ -106,7 +106,7 @@ def __init__(self, win=None, target=None, positions=None, target_animation={}, r """ ValidationProcedure is used to check the accuracy of a calibrated eye tracking system. - Once a ValidationProcedure class instance has been created, the run(**kwargs) method + Once a ValidationProcedure class instance has been created, the `.run()` method can be called to start the validation process. The validation process consists of the following stages: @@ -168,7 +168,7 @@ def __init__(self, win=None, target=None, positions=None, target_animation={}, r average error is displayed for all target positions. A key press is used to remove the validation results plot, and control is returned to the script that started the validation display. Note that the plot is also - saved as a png file in the same directory as the calling stript. + saved as a png file in the same directory as the calling script. See the validation.py demo in demos.coder.iohub.eyetracker for example usage. @@ -210,6 +210,7 @@ def __init__(self, win=None, target=None, positions=None, target_animation={}, r self.accuracy_period_stop = accuracy_period_stop self.show_intro_screen = show_intro_screen self.intro_text = intro_text + self.intro_text_stim = None self.show_results_screen = show_results_screen self.results_in_degrees = results_in_degrees self.save_figure_path = save_figure_path @@ -234,8 +235,7 @@ def __init__(self, win=None, target=None, positions=None, target_animation={}, r terminate_key=terminate_key, gaze_cursor_key=toggle_gaze_cursor_key) # Stim for results screen - self.imagestim = None - self.textstim = None + self.results_screen = dict() self.use_dpi = 90 def run(self): @@ -275,24 +275,21 @@ def run(self): def showResultsScreen(self): self._buildResultScreen() - if self.imagestim: - self.imagestim.draw() - self.textstim.draw() return self.win.flip() def showIntroScreen(self): text = self.intro_text + '\nPress SPACE to Start....' textpos = (0, 0) - if self.textstim: - self.textstim.setText(text) - self.textstim.setPos(textpos) + if self.intro_text_stim: + self.intro_text_stim.setText(text) + self.intro_text_stim.setPos(textpos) else: - self.textstim = visual.TextStim(self.win, text=text, pos=textpos, height=30, color=(0, 0, 0), - colorSpace='rgb255', opacity=1.0, contrast=1.0, units='pix', - ori=0.0, antialias=True, bold=False, italic=False, anchorHoriz='center', - anchorVert='center', wrapWidth=self.win.size[0] * .8) + self.intro_text_stim = visual.TextStim(self.win, text=text, pos=textpos, height=30, color=(0, 0, 0), + colorSpace='rgb255', opacity=1.0, contrast=1.0, units='pix', + ori=0.0, antialias=True, bold=False, italic=False, anchorHoriz='center', + anchorVert='center', wrapWidth=self.win.size[0] * .8) - self.textstim.draw() + self.intro_text_stim.draw() return self.win.flip() def getValidationResults(self): @@ -473,8 +470,6 @@ def createPlot(self): Creates a matplotlib figure of validation results. :return: """ - from matplotlib import pyplot as pl - results = self.getValidationResults() if results is None: raise RuntimeError("Validation must be run before creating results plot.") @@ -536,45 +531,72 @@ def _generateImageName(self): rootScriptPath = os.path.dirname(sys.argv[0]) return normjoin(rootScriptPath, file_name) - def _buildResultScreen(self, replot=False): + def _buildResultScreen(self, rebuild=False): """ Build validation results screen. - Currently saves the plot from .createPlot() to disk and the loads that as an image. + :param replot: :return: """ - if replot or self.imagestim is None: - iname = self._generateImageName() - self.createPlot().savefig(iname, dpi=self.use_dpi) - text_pos = (0, 0) - text = 'Accuracy Calculation not Possible do to Analysis Error. Press SPACE to continue.' + results = self.getValidationResults() - if iname: - fig_image = Image.open(iname) + for tp in self.positions.getPositions(): + self.targetsequence.target.setPos(tp) + self.targetsequence.target.draw() - if self.imagestim: - self.imagestim.setImage(fig_image) - else: - self.imagestim = visual.ImageStim(self.win, image=fig_image, units='pix', pos=(0.0, 0.0)) + title_txt = 'Validation Accuracy\nMin: %.4f, Max: %.4f,' \ + ' Mean %.4f (%s units)' % (results['min_error'], results['max_error'], + results['mean_error'], results['reporting_unit_type']) + title_stim = visual.TextStim(self.win, text=title_txt, height=24, pos=(0.0, (self.win.size[1] / 2.0) * .9), + color=(0, 0, 0), colorSpace='rgb255', units='pix', antialias=True, + anchorVert='center', anchorHoriz='center', wrapWidth=self.win.size[0] * .8) + title_stim.draw() - text = 'Press SPACE to continue.' - text_pos = (0.0, -(self.win.size[1] / 2.0) * .9) - else: - self.imagestim = None + exit_text = visual.TextStim(self.win, text='Press SPACE to continue.', opacity=1.0, units='pix', height=None, + pos=(0.0, -(self.win.size[1] / 2.0) * .9), color=(0, 0, 0), colorSpace='rgb255', + antialias=True, bold=True, anchorVert='center', anchorHoriz='center', + wrapWidth=self.win.size[0] * .8) + exit_text.draw() - if self.textstim is None: - self.textstim = visual.TextStim(self.win, text=text, pos=text_pos, color=(0, 0, 0), colorSpace='rgb255', - opacity=1.0, contrast=1.0, units='pix', ori=0.0, height=None, - antialias=True, bold=False, italic=False, anchorVert='center', - anchorHoriz='center', wrapWidth=self.win.size[0] * .8) + color_list = pl.cm.tab20b(np.linspace(0, 1, (len(results['position_results'])))) + # draw eye samples + ci = 0 + sample_gfx_radius = deg2pix(0.33, self.win.monitor, correctFlat=False) + for position_results in results['position_results']: + pindex = position_results['pos_index'] + color = color_list[ci]*2.0-1.0 + self.results_screen['target_%d_sample' % pindex] = visual.Circle(self.win, radius=sample_gfx_radius, + fillColor=color, lineColor=[1, 1, 1], + lineWidth=1, edges=64, units='pix', + colorSpace='rgb', opacity=0.66, + interpolate=True, autoLog=False) + sample_gfx = self.results_screen['target_%d_sample' % pindex] + + if position_results['calculation_status'] == 'FAILED': + print("TODO: Draw gfx for FAILED validation point.") + pass else: - self.textstim.setText(text) - self.textstim.setPos(text_pos) + samples = position_results['sample_from_filter_stages']['used_samples'] + target_x = samples[:]['targ_pos_x'] + target_y = samples[:]['targ_pos_y'] + if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + gaze_x = (samples[:]['left_eye_x'] + samples[:]['right_eye_x']) / 2.0 + gaze_y = (samples[:]['left_eye_y'] + samples[:]['right_eye_y']) / 2.0 + else: + gaze_x = samples[:]['eye_x'] + gaze_y = samples[:]['eye_y'] - elif self.imagestim: - return True - return False + for i in range(len(gaze_x)): + pix_pos = toPix(self.win, gaze_x[i], gaze_y[i]) + pix_pos = pix_pos[0][0], pix_pos[1][0] + sample_gfx.setPos(pix_pos) + sample_gfx.draw() + #pl.text(target_x[0], target_y[0], str(pindex), size=16, color=color_list[ci], + # horizontalalignment='center', + # verticalalignment='center') + #pl.scatter(gaze_x, gaze_y, s=40, color=color_list[ci], alpha=0.75) + ci += 1 class ValidationTargetRenderer(object): @@ -628,9 +650,10 @@ def __init__(self, win, target, positions, background=None, storeeventsfor=[], t """ ValidationTargetRenderer is an internal class used by `ValidationProcedure`. - psychopy.iohub.Trigger based classes are used to define the criteria used to - start displaying the next target position graphics. By providing a set of - DeviceEventTriggers, complex criteria for target position pacing can be defined. + psychopy.iohub.client.eyetracker.validation.Trigger based classes are used + to define the criteria used to start displaying the next target position graphics. + By providing a set of DeviceEventTriggers, complex criteria for + target position pacing can be defined. iohub devices can be provided in the storeeventsfor keyword argument. Events which occur during each target position presentation period are From d6b7421af80a01bec8b2a4a20294b212c5ca3e79 Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Thu, 1 Apr 2021 09:34:13 -0300 Subject: [PATCH 14/26] FF: validation results screen validation results screen now uses psychopy only gfx. prints stats for each target location on screen. --- .../coder/iohub/eyetracking/validation.py | 4 +- .../client/eyetracker/validation/procedure.py | 132 ++++++------------ 2 files changed, 43 insertions(+), 93 deletions(-) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation.py b/psychopy/demos/coder/iohub/eyetracking/validation.py index 697ddaa908c..ea363821ccd 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation.py @@ -38,8 +38,8 @@ # target_positions: Provide your own list of validation positions, # or use the PositionGrid class to generate a set. - target_positions = [(0.0, 0.0), (0.85, 0.85), (-0.85, 0.0),]# (0.85, 0.0), (0.85, -0.85), (-0.85, 0.85), - # (-0.85, -0.85), (0.0, 0.85), (0.0, -0.85)] + target_positions = [(0.0, 0.0), (0.85, 0.85), (-0.85, 0.0), (0.85, 0.0), (0.85, -0.85), (-0.85, 0.85), + (-0.85, -0.85), (0.0, 0.85), (0.0, -0.85)] # Create a validation procedure validation_proc = ValidationProcedure(win, diff --git a/psychopy/iohub/client/eyetracker/validation/procedure.py b/psychopy/iohub/client/eyetracker/validation/procedure.py index 41b18eb0f40..d65a4b96e43 100644 --- a/psychopy/iohub/client/eyetracker/validation/procedure.py +++ b/psychopy/iohub/client/eyetracker/validation/procedure.py @@ -235,7 +235,6 @@ def __init__(self, win=None, target=None, positions=None, target_animation={}, r terminate_key=terminate_key, gaze_cursor_key=toggle_gaze_cursor_key) # Stim for results screen - self.results_screen = dict() self.use_dpi = 90 def run(self): @@ -274,7 +273,7 @@ def run(self): return self.validation_results def showResultsScreen(self): - self._buildResultScreen() + self.drawResultScreen() return self.win.flip() def showIntroScreen(self): @@ -304,6 +303,7 @@ def _createValidationResults(self): """ self.validation_results = None sample_array = self.targetsequence.getSampleMessageData() + target_positions_used = self.targetsequence.positions.getPositions() if self.results_in_degrees: for postdat in sample_array: @@ -331,7 +331,7 @@ def _createValidationResults(self): self.io.sendMessageEvent('Results', 'VALIDATION') results = dict(display_units=self.win.units, display_bounds=self.positions.bounds, display_pix=self.win.size, position_count=len(sample_array), - target_positions=self.targetsequence.positions.getPositions()) + target_positions=target_positions_used) for k, v in results.items(): self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') @@ -380,7 +380,8 @@ def _createValidationResults(self): # do not have missing data used_samples=good_samples_in_period) - position_results = dict(pos_index=pindex, + position_results = dict(index=pindex, + position=target_positions_used[pindex], sample_time_range=[first_stime, last_stime], filter_samples_time_range=[filter_stime, filter_etime], valid_filtered_sample_perc=good_sample_ratio) @@ -390,8 +391,9 @@ def _createValidationResults(self): position_results['sample_from_filter_stages'] = sample_msg_data_filtering + position_results2 = dict() if int(good_sample_ratio * 100) == 0: - position_results['calculation_status'] = 'FAILED' + position_results2['calculation_status'] = 'FAILED' results['positions_failed_processing'] += 1 else: target_x = good_samples_in_period[:]['targ_pos_x'] @@ -435,18 +437,17 @@ def _createValidationResults(self): summed_error += lr_error_mean point_count += 1.0 - position_results2 = dict() position_results2['calculation_status'] = 'PASSED' position_results2['target_position'] = (target_x[0], target_y[0]) position_results2['min_error'] = lr_error_min position_results2['max_error'] = lr_error_max position_results2['mean_error'] = lr_error_mean position_results2['stdev_error'] = lr_error_std - for k, v in position_results2.items(): - self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') - position_results[k] = v - results['position_results'].append(position_results) - self.io.sendMessageEvent('Done Target Position Results : {0}'.format(pindex), 'VALIDATION') + for k, v in position_results2.items(): + self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') + position_results[k] = v + results['position_results'].append(position_results) + self.io.sendMessageEvent('Done Target Position Results : {0}'.format(pindex), 'VALIDATION') unit_type = self.win.units if self.results_in_degrees: @@ -465,64 +466,6 @@ def _createValidationResults(self): self.validation_results = results return self.validation_results - def createPlot(self): - """ - Creates a matplotlib figure of validation results. - :return: - """ - results = self.getValidationResults() - if results is None: - raise RuntimeError("Validation must be run before creating results plot.") - - pixw, pixh = results['display_pix'] - - pl.clf() - fig = pl.gcf() - fig.set_size_inches((pixw * .9) / self.use_dpi, (pixh * .8) / self.use_dpi) - color_list = pl.cm.tab20b(np.linspace(0, 1, (len(results['position_results'])))) - ci = 0 - for position_results in results['position_results']: - pindex = position_results['pos_index'] - if position_results['calculation_status'] == 'FAILED': - # Draw nothing for failed position - pass - else: - samples = position_results['sample_from_filter_stages']['used_samples'] - target_x = samples[:]['targ_pos_x'] - target_y = samples[:]['targ_pos_y'] - if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: - gaze_x = (samples[:]['left_eye_x'] + samples[:]['right_eye_x']) / 2.0 - gaze_y = (samples[:]['left_eye_y'] + samples[:]['right_eye_y']) / 2.0 - else: - gaze_x = samples[:]['eye_x'] - gaze_y = samples[:]['eye_y'] - - pl.scatter(target_x[0], target_y[0], s=400, color=color_list[ci]) - pl.scatter(target_x[0], target_y[0], s=300, color=(0.75, 0.75, 0.75)) - pl.text(target_x[0], target_y[0], str(pindex), size=16, color=color_list[ci], - horizontalalignment='center', - verticalalignment='center') - pl.scatter(gaze_x, gaze_y, s=40, color=color_list[ci], alpha=0.75) - ci += 1 - - if self.results_in_degrees: - l, b = toDeg(self.win, (-pixw / 2,), (-pixh / 2,)) - r, t = toDeg(self.win, (pixw / 2,), (pixh / 2,)) - else: - l, t, r, b = results['display_bounds'] - - pl.xlim(l, r) - pl.ylim(b, t) - pl.xlabel('Horizontal Position (%s)' % (results['reporting_unit_type'])) - pl.ylabel('Vertical Position (%s)' % (results['reporting_unit_type'])) - pl.title('Validation Accuracy (%s)\nMin: %.4f, Max: %.4f, Mean %.4f' % (results['reporting_unit_type'], - results['min_error'], - results['max_error'], - results['mean_error'])) - - fig.tight_layout() - return fig - def _generateImageName(self): import datetime file_name = 'validation_' + datetime.datetime.now().strftime('%d_%m_%Y_%H_%M') + '.png' @@ -531,11 +474,9 @@ def _generateImageName(self): rootScriptPath = os.path.dirname(sys.argv[0]) return normjoin(rootScriptPath, file_name) - def _buildResultScreen(self, rebuild=False): + def drawResultScreen(self): """ - Build validation results screen. - - :param replot: + Draw validation results screen. :return: """ @@ -548,13 +489,13 @@ def _buildResultScreen(self, rebuild=False): title_txt = 'Validation Accuracy\nMin: %.4f, Max: %.4f,' \ ' Mean %.4f (%s units)' % (results['min_error'], results['max_error'], results['mean_error'], results['reporting_unit_type']) - title_stim = visual.TextStim(self.win, text=title_txt, height=24, pos=(0.0, (self.win.size[1] / 2.0) * .9), + title_stim = visual.TextStim(self.win, text=title_txt, height=24, pos=(0.0, (self.win.size[1] / 2.0) * .95), color=(0, 0, 0), colorSpace='rgb255', units='pix', antialias=True, anchorVert='center', anchorHoriz='center', wrapWidth=self.win.size[0] * .8) title_stim.draw() exit_text = visual.TextStim(self.win, text='Press SPACE to continue.', opacity=1.0, units='pix', height=None, - pos=(0.0, -(self.win.size[1] / 2.0) * .9), color=(0, 0, 0), colorSpace='rgb255', + pos=(0.0, -(self.win.size[1] / 2.0) * .95), color=(0, 0, 0), colorSpace='rgb255', antialias=True, bold=True, anchorVert='center', anchorHoriz='center', wrapWidth=self.win.size[0] * .8) exit_text.draw() @@ -564,22 +505,22 @@ def _buildResultScreen(self, rebuild=False): ci = 0 sample_gfx_radius = deg2pix(0.33, self.win.monitor, correctFlat=False) for position_results in results['position_results']: - pindex = position_results['pos_index'] color = color_list[ci]*2.0-1.0 - self.results_screen['target_%d_sample' % pindex] = visual.Circle(self.win, radius=sample_gfx_radius, - fillColor=color, lineColor=[1, 1, 1], - lineWidth=1, edges=64, units='pix', - colorSpace='rgb', opacity=0.66, - interpolate=True, autoLog=False) - sample_gfx = self.results_screen['target_%d_sample' % pindex] + sample_gfx = visual.Circle(self.win, radius=sample_gfx_radius, fillColor=color, lineColor=[1, 1, 1], + lineWidth=1, edges=64, units='pix', colorSpace='rgb', opacity=0.66, + interpolate=True, autoLog=False) if position_results['calculation_status'] == 'FAILED': - print("TODO: Draw gfx for FAILED validation point.") - pass + position_txt = "Failed" + txt_bold=True + position_txt_color = "red" + target_x, target_y = position_results['position'] + text_pix_pos = toPix(self.win, target_x, target_y) + text_pix_pos = text_pix_pos[0][0], text_pix_pos[1][0] else: samples = position_results['sample_from_filter_stages']['used_samples'] - target_x = samples[:]['targ_pos_x'] - target_y = samples[:]['targ_pos_y'] + target_x = samples[:]['targ_pos_x'][0] + target_y = samples[:]['targ_pos_y'][0] if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: gaze_x = (samples[:]['left_eye_x'] + samples[:]['right_eye_x']) / 2.0 gaze_y = (samples[:]['left_eye_y'] + samples[:]['right_eye_y']) / 2.0 @@ -592,11 +533,20 @@ def _buildResultScreen(self, rebuild=False): pix_pos = pix_pos[0][0], pix_pos[1][0] sample_gfx.setPos(pix_pos) sample_gfx.draw() - #pl.text(target_x[0], target_y[0], str(pindex), size=16, color=color_list[ci], - # horizontalalignment='center', - # verticalalignment='center') - #pl.scatter(gaze_x, gaze_y, s=40, color=color_list[ci], alpha=0.75) - ci += 1 + txt_bold = False + position_txt = "Gaze Error:\nMin: %.4f\nMax: %.4f\nAvg: %.4f\nStdev: %.4f"%(position_results['min_error'], + position_results['max_error'], + position_results['mean_error'], + position_results['stdev_error']) + position_txt_color = "green" + text_pix_pos = toPix(self.win, target_x, target_y) + text_pix_pos = text_pix_pos[0][0], text_pix_pos[1][0] + + target_text_stim = visual.TextStim(self.win, text=position_txt, units='pix', pos=text_pix_pos, + height=21, color=position_txt_color, antialias=True, bold=txt_bold, + anchorVert='center', anchorHoriz='center') + target_text_stim.draw() + ci += 1 class ValidationTargetRenderer(object): From 2d5609e515fc0d9bcf60f52e9a998dfce4246e29 Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Thu, 1 Apr 2021 09:44:45 -0300 Subject: [PATCH 15/26] RF: some pep8 cleanup --- .../client/eyetracker/validation/posgrid.py | 6 ++-- .../client/eyetracker/validation/procedure.py | 32 ++++++++++--------- .../client/eyetracker/validation/trigger.py | 11 ++++--- 3 files changed, 26 insertions(+), 23 deletions(-) diff --git a/psychopy/iohub/client/eyetracker/validation/posgrid.py b/psychopy/iohub/client/eyetracker/validation/posgrid.py index b55fdd6fd53..e60a19c5544 100644 --- a/psychopy/iohub/client/eyetracker/validation/posgrid.py +++ b/psychopy/iohub/client/eyetracker/validation/posgrid.py @@ -1,6 +1,7 @@ import numpy as np from psychopy.iohub.client import ioHubConnection + class PositionGrid(object): def __init__(self, bounds=None, @@ -88,7 +89,7 @@ def __init__(self, if self.bounds is None: self.bounds = ioHubConnection.getActiveConnection().devices.display.getCoordBounds() - winSize = self.bounds[2]-self.bounds[0], self.bounds[3]-self.bounds[1] + winSize = self.bounds[2] - self.bounds[0], self.bounds[3] - self.bounds[1] self.firstposindex = firstposindex self.repeatfirstpos = repeatFirstPos @@ -117,7 +118,7 @@ def __init__(self, if posList: # User has provided the target positions, use posList to set # self.positions as array of x,y pairs. - if (len(posList) == 2 and len(posList[0]) != 2 and len(posList[0]) == len(posList[1])): + if len(posList) == 2 and len(posList[0]) != 2 and len(posList[0]) == len(posList[1]): # positions were provided in ((x1,x2,..,xn),(y1,y2,..,yn)) # format self.positions = np.column_stack((posList[0], posList[1])) @@ -158,7 +159,6 @@ def __init__(self, else: raise ValueError('PositionGrid bottomMargin kwarg must be < winSize[1]') - if horzScale: if 0.0 < horzScale <= 1.0: xmin += (1.0 - horzScale) / 2.0 diff --git a/psychopy/iohub/client/eyetracker/validation/procedure.py b/psychopy/iohub/client/eyetracker/validation/procedure.py index d65a4b96e43..8069c57ca2a 100644 --- a/psychopy/iohub/client/eyetracker/validation/procedure.py +++ b/psychopy/iohub/client/eyetracker/validation/procedure.py @@ -285,8 +285,9 @@ def showIntroScreen(self): else: self.intro_text_stim = visual.TextStim(self.win, text=text, pos=textpos, height=30, color=(0, 0, 0), colorSpace='rgb255', opacity=1.0, contrast=1.0, units='pix', - ori=0.0, antialias=True, bold=False, italic=False, anchorHoriz='center', - anchorVert='center', wrapWidth=self.win.size[0] * .8) + ori=0.0, antialias=True, bold=False, italic=False, + anchorHoriz='center', anchorVert='center', + wrapWidth=self.win.size[0] * .8) self.intro_text_stim.draw() return self.win.flip() @@ -309,7 +310,7 @@ def _createValidationResults(self): for postdat in sample_array: postdat['targ_pos_x'], postdat['targ_pos_y'] = toDeg(self.win, *toPix(self.win, postdat['targ_pos_x'], - postdat['targ_pos_y'])) + postdat['targ_pos_y'])) if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: postdat['left_eye_x'], postdat['left_eye_y'] = toDeg(self.win, @@ -505,14 +506,14 @@ def drawResultScreen(self): ci = 0 sample_gfx_radius = deg2pix(0.33, self.win.monitor, correctFlat=False) for position_results in results['position_results']: - color = color_list[ci]*2.0-1.0 + color = color_list[ci] * 2.0 - 1.0 sample_gfx = visual.Circle(self.win, radius=sample_gfx_radius, fillColor=color, lineColor=[1, 1, 1], lineWidth=1, edges=64, units='pix', colorSpace='rgb', opacity=0.66, interpolate=True, autoLog=False) if position_results['calculation_status'] == 'FAILED': position_txt = "Failed" - txt_bold=True + txt_bold = True position_txt_color = "red" target_x, target_y = position_results['position'] text_pix_pos = toPix(self.win, target_x, target_y) @@ -534,10 +535,11 @@ def drawResultScreen(self): sample_gfx.setPos(pix_pos) sample_gfx.draw() txt_bold = False - position_txt = "Gaze Error:\nMin: %.4f\nMax: %.4f\nAvg: %.4f\nStdev: %.4f"%(position_results['min_error'], - position_results['max_error'], - position_results['mean_error'], - position_results['stdev_error']) + position_txt = "Gaze Error:\nMin: %.4f\nMax: %.4f\n" \ + "Avg: %.4f\nStdev: %.4f" % (position_results['min_error'], + position_results['max_error'], + position_results['mean_error'], + position_results['stdev_error']) position_txt_color = "green" text_pix_pos = toPix(self.win, target_x, target_y) text_pix_pos = text_pix_pos[0][0], text_pix_pos[1][0] @@ -785,8 +787,10 @@ def _hasTriggerFired(self, **kwargs): for trig in self.triggers: if trig.triggered(**kwargs): triggered = trig + self._addDeviceEvents(trig.clearEventHistory(True)) + if triggered: break - self._addDeviceEvents(trig.clearEventHistory(True)) + if triggered: # by default, assume it was a timer trigger,so use 255 as 'event type' event_type_id = 255 @@ -940,7 +944,7 @@ def display(self, **kwargs): io.sendMessageEvent('VALIDATION TERMINATED BY USER', self.msgcategory) return False - io.sendMessageEvent('DONE_SEQUENCE {0}'.format( len(self.positions.positions)), self.msgcategory) + io.sendMessageEvent('DONE_SEQUENCE {0}'.format(len(self.positions.positions)), self.msgcategory) sleep(0.025) self._addDeviceEvents() io.clearEvents('all') @@ -950,8 +954,6 @@ def _processMessageEvents(self): self.target_pos_msgs = [] self.saved_pos_samples = [] for pd in self.targetdata: - frompos = pd.get('frompos') - topos = pd.get('topos') events = pd.get('events') # create a dict of device labels as keys, device events as value @@ -1067,7 +1069,7 @@ def getSampleData(s): while si < pos_sample_count: sample = samples[si] - if sample.time >= last_msg_time and sample.time < current_msg[0]: + if last_msg_time <= sample.time < current_msg[0]: sarray = [pindex, last_msg_time, last_msg_type, current_msg[0], current_msg[2], current_target_pos[0], current_target_pos[1], @@ -1116,4 +1118,4 @@ def toDeg(win, x, y): xy[:, 0] = x xy[:, 1] = y r = pix2deg(xy, win.monitor, correctFlat=False) - return r[:, 0], r[:, 1] \ No newline at end of file + return r[:, 0], r[:, 1] diff --git a/psychopy/iohub/client/eyetracker/validation/trigger.py b/psychopy/iohub/client/eyetracker/validation/trigger.py index a2a845cffef..3880a10091a 100644 --- a/psychopy/iohub/client/eyetracker/validation/trigger.py +++ b/psychopy/iohub/client/eyetracker/validation/trigger.py @@ -49,18 +49,15 @@ def clearEventHistory(cls, returncopy=False): return {} @classmethod - def getTriggersFrom(cls,triggers): + def getTriggersFrom(cls, triggers): """ Returns a list of Trigger instances generated based on the contents of the input triggers. - :param io: :param triggers: :return: """ # Handle different valid trigger object types - trig_list = () - io = cls.io if isinstance(triggers, (list, tuple)): # Support is provided for a list of Trigger objects or a list of # strings. @@ -94,6 +91,7 @@ def getTriggersFrom(cls,triggers): raise ValueError('The triggers kwarg could not be understood as a valid triggers input value.') return trig_list + class TimeTrigger(Trigger): """ A TimeTrigger associates a delay from the provided start_time @@ -101,6 +99,7 @@ class TimeTrigger(Trigger): start_time and delay can be sec.msec float, or a callable object (that takes no parameters). """ + def __init__(self, start_time, delay, repeat_count=0, trigger_function=lambda a, b, c: True, user_kwargs={}): Trigger.io = ioHubConnection.getActiveConnection() Trigger.__init__(self, trigger_function, user_kwargs, repeat_count) @@ -112,6 +111,7 @@ def startTimeFunc(): if self._start_time is None: self._start_time = getTime() return self._start_time + self.startTime = startTimeFunc else: self.startTime = start_time @@ -120,6 +120,7 @@ def startTimeFunc(): if not callable(delay): def delayFunc(): return delay + self.delay = delayFunc def triggered(self, **kwargs): @@ -157,6 +158,7 @@ class DeviceEventTrigger(Trigger): returns True. """ _lastEventsByDevice = dict() + def __init__(self, device, event_type, event_attribute_conditions={}, repeat_count=-1, trigger_function=lambda a, b, c: True, user_kwargs={}): Trigger.io = ioHubConnection.getActiveConnection() @@ -231,4 +233,3 @@ def __init__(self, key, on_press=False): etype = EventConstants.KEYBOARD_RELEASE DeviceEventTrigger.__init__(self, self.io.devices.keyboard, event_type=etype, event_attribute_conditions={'key': key}) - From 5439ab3a70a7ef69a2a026dc743ea9d63e7647c6 Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Thu, 1 Apr 2021 09:48:37 -0300 Subject: [PATCH 16/26] DOC: Added copyright msg to new iohub files --- psychopy/iohub/client/eyetracker/__init__.py | 4 ++++ psychopy/iohub/client/eyetracker/validation/__init__.py | 5 +++++ psychopy/iohub/client/eyetracker/validation/posgrid.py | 5 +++++ psychopy/iohub/client/eyetracker/validation/procedure.py | 3 +++ psychopy/iohub/client/eyetracker/validation/trigger.py | 5 +++++ 5 files changed, 22 insertions(+) diff --git a/psychopy/iohub/client/eyetracker/__init__.py b/psychopy/iohub/client/eyetracker/__init__.py index e69de29bb2d..41998c05094 100644 --- a/psychopy/iohub/client/eyetracker/__init__.py +++ b/psychopy/iohub/client/eyetracker/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). \ No newline at end of file diff --git a/psychopy/iohub/client/eyetracker/validation/__init__.py b/psychopy/iohub/client/eyetracker/validation/__init__.py index 101bc600ad5..05b0145c570 100644 --- a/psychopy/iohub/client/eyetracker/validation/__init__.py +++ b/psychopy/iohub/client/eyetracker/validation/__init__.py @@ -1,3 +1,8 @@ +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). + from .posgrid import PositionGrid from .trigger import Trigger, KeyboardTrigger, DeviceEventTrigger, TimeTrigger from .procedure import TargetStim, ValidationProcedure \ No newline at end of file diff --git a/psychopy/iohub/client/eyetracker/validation/posgrid.py b/psychopy/iohub/client/eyetracker/validation/posgrid.py index e60a19c5544..c294c2212b0 100644 --- a/psychopy/iohub/client/eyetracker/validation/posgrid.py +++ b/psychopy/iohub/client/eyetracker/validation/posgrid.py @@ -1,3 +1,8 @@ +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). + import numpy as np from psychopy.iohub.client import ioHubConnection diff --git a/psychopy/iohub/client/eyetracker/validation/procedure.py b/psychopy/iohub/client/eyetracker/validation/procedure.py index 8069c57ca2a..72a1f165c91 100644 --- a/psychopy/iohub/client/eyetracker/validation/procedure.py +++ b/psychopy/iohub/client/eyetracker/validation/procedure.py @@ -1,4 +1,7 @@ # -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). """ Eye Tracker Validation procedure using the ioHub common eye tracker interface. diff --git a/psychopy/iohub/client/eyetracker/validation/trigger.py b/psychopy/iohub/client/eyetracker/validation/trigger.py index 3880a10091a..1a1483c5cf8 100644 --- a/psychopy/iohub/client/eyetracker/validation/trigger.py +++ b/psychopy/iohub/client/eyetracker/validation/trigger.py @@ -1,3 +1,8 @@ +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). + from psychopy import core from psychopy.iohub.constants import EventConstants from psychopy.iohub.client import ioHubConnection From f5566396372bfe0e20890b0646a185faee44a43d Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Thu, 1 Apr 2021 11:08:52 -0300 Subject: [PATCH 17/26] FF: Added save_results_screen option to ValidationProcedure If true, saves results screen to .png image in script directory. show_results_screen must also be True for it to work. --- .../demos/coder/iohub/eyetracking/validation.py | 1 + .../client/eyetracker/validation/procedure.py | 16 ++++++++++------ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation.py b/psychopy/demos/coder/iohub/eyetracking/validation.py index ea363821ccd..d00115fd89a 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation.py @@ -54,6 +54,7 @@ show_intro_screen=True, intro_text='Validation procedure is now going to be performed.', show_results_screen=True, results_in_degrees=False, + save_results_screen=True, toggle_gaze_cursor_key='g', terminate_key='escape') # Run the validation procedure. run() does not return until the validation is complete. diff --git a/psychopy/iohub/client/eyetracker/validation/procedure.py b/psychopy/iohub/client/eyetracker/validation/procedure.py index 72a1f165c91..47753104dbb 100644 --- a/psychopy/iohub/client/eyetracker/validation/procedure.py +++ b/psychopy/iohub/client/eyetracker/validation/procedure.py @@ -104,7 +104,7 @@ class ValidationProcedure(object): def __init__(self, win=None, target=None, positions=None, target_animation={}, randomize_positions=True, background=None, triggers=None, storeeventsfor=None, accuracy_period_start=0.350, accuracy_period_stop=.050, show_intro_screen=True, intro_text='Ready to Start Validation Procedure.', - show_results_screen=True, results_in_degrees=False, save_figure_path=None, + show_results_screen=True, results_in_degrees=False, save_results_screen=False, terminate_key="escape", toggle_gaze_cursor_key="g"): """ ValidationProcedure is used to check the accuracy of a calibrated eye tracking system. @@ -189,7 +189,7 @@ def __init__(self, win=None, target=None, positions=None, target_animation={}, r :param intro_text: :param show_results_screen: :param results_in_degrees: - :param save_figure_path: + :param save_results_screen: :param terminate_key: :param toggle_gaze_cursor_key: """ @@ -216,7 +216,7 @@ def __init__(self, win=None, target=None, positions=None, target_animation={}, r self.intro_text_stim = None self.show_results_screen = show_results_screen self.results_in_degrees = results_in_degrees - self.save_figure_path = save_figure_path + self.save_results_screen = save_results_screen self.validation_results = None if storeeventsfor is None: storeeventsfor = [self.io.devices.keyboard, @@ -277,7 +277,11 @@ def run(self): def showResultsScreen(self): self.drawResultScreen() - return self.win.flip() + ftime = self.win.flip() + if self.save_results_screen: + self.win.getMovieFrame() + self.win.saveMovieFrames(self._generateImageName()) + return ftime def showIntroScreen(self): text = self.intro_text + '\nPress SPACE to Start....' @@ -473,8 +477,8 @@ def _createValidationResults(self): def _generateImageName(self): import datetime file_name = 'validation_' + datetime.datetime.now().strftime('%d_%m_%Y_%H_%M') + '.png' - if self.save_figure_path: - return normjoin(self.save_figure_path, file_name) + #if self.save_results_screen: + # return normjoin(self.save_results_screen, file_name) rootScriptPath = os.path.dirname(sys.argv[0]) return normjoin(rootScriptPath, file_name) From 61c5c07fe7de3eacdc0143a41a58c249e10473bf Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Fri, 2 Apr 2021 08:11:01 -0300 Subject: [PATCH 18/26] FF: validation results_in_degrees working again. --- .../coder/iohub/eyetracking/validation.py | 11 +- psychopy/iohub/changes.txt | 6 +- .../client/eyetracker/validation/procedure.py | 266 ++++++++++-------- 3 files changed, 160 insertions(+), 123 deletions(-) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation.py b/psychopy/demos/coder/iohub/eyetracking/validation.py index d00115fd89a..110e8ca0672 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation.py @@ -50,12 +50,15 @@ expandedscale=3.0, expansionduration=0.2, contractionduration=0.4), - accuracy_period_start=0.550, accuracy_period_stop=.150, + accuracy_period_start=0.550, + accuracy_period_stop=.150, show_intro_screen=True, - intro_text='Validation procedure is now going to be performed.', - show_results_screen=True, results_in_degrees=False, + intro_text='Eye Tracker Validation Procedure.', + show_results_screen=True, + results_in_degrees=True, save_results_screen=True, - toggle_gaze_cursor_key='g', terminate_key='escape') + toggle_gaze_cursor_key='g', + terminate_key='escape') # Run the validation procedure. run() does not return until the validation is complete. validation_results = validation_proc.run() diff --git a/psychopy/iohub/changes.txt b/psychopy/iohub/changes.txt index 18077388232..4a4c05aa74e 100644 --- a/psychopy/iohub/changes.txt +++ b/psychopy/iohub/changes.txt @@ -20,5 +20,7 @@ Changes made to iohub for 2021.2 Release - Removed iohub/devices/display/unit_conversions.py. Moving to psychopy monitorutil functions. - launchHubServer now accepts a psychopy window using the window kwarg. iohub display info is updated using window information like .monitor, .colorSpace, .units, .screen -- If psychopy window (with monitor config) is passed to launchHubServer, - user can forget about need for iohub Display config. \ No newline at end of file +- If psychopy window (with monitor config) is passed to launchHubServer, no need for iohub Display config. +- Added common eye tracker interface validation procedure. Coder Demo: demos/coder/iohub/eyetracking/validation.py + - TODO: Retest all eye trackers +- Updated psychopy.iohub API docs. \ No newline at end of file diff --git a/psychopy/iohub/client/eyetracker/validation/procedure.py b/psychopy/iohub/client/eyetracker/validation/procedure.py index 47753104dbb..0409422baf1 100644 --- a/psychopy/iohub/client/eyetracker/validation/procedure.py +++ b/psychopy/iohub/client/eyetracker/validation/procedure.py @@ -6,15 +6,13 @@ Eye Tracker Validation procedure using the ioHub common eye tracker interface. To use the validation process from within a Coder script: -* Create a target stim, using TargetStim, or any stim class that has a `.setPos()`, `setRadius()`, and `.draw()` - method. -* Create a list of target positions to use during validation. Use PositionGrid class to help create - a target position list. -* Create a ValidationProcedure class instance, providing the target stim and position list - and other arguments to define details of the validation procedure. +* Create a target stim, using TargetStim, or any stim class that has a `.setPos()`, `setRadius()`, and `.draw()` method. +* Create a list of validation target positions. Use the `PositionGrid` class to help create a target position list. +* Create a ValidationProcedure class instance, providing the target stim and position list and other arguments + to define details of the validation procedure. * Use `ValidationProcedure.run()` to perform the validation routine. -* Use `ValidationProcedure.getValidationResults()` to access information about each target - position displayed and the events collected during the each target validation period. +* Use `ValidationProcedure.getValidationResults()` to access information about each target position displayed and + the events collected during the each target validation period. See demos/coder/iohub/eyetracking/validation.py for a complete example. """ @@ -44,7 +42,7 @@ def __init__(self, win, radius=None, fillcolor=None, edgecolor=None, edgewidth=N """ TargetStim is a 'doughnut' style target graphic used during the validation procedure. - :param win: Window being sued for validation. + :param win: Window being used for validation. :param radius: The outer radius of the target. :param fillcolor: The color used to fill the target body. :param edgecolor: The color for the edge around the target. @@ -71,19 +69,21 @@ def __init__(self, win, radius=None, fillcolor=None, edgecolor=None, edgewidth=N colorSpace=colorspace, opacity=opacity, contrast=contrast, autoLog=False) self.stim.append(centerdot) - def setRadius(self, r): - """ - Update the radius of the target stim. - """ - self.stim[0].radius = r - def setPos(self, pos): """ - Set the center position of the target stim. + Set the center position of the target stim. Used during validation procedure to + change target position. """ for s in self.stim: s.setPos(pos) + def setRadius(self, r): + """ + Update the radius of the target stim. (Optionally) used during validation procedure to + expand / contract the target stim. + """ + self.stim[0].radius = r + def draw(self): """ Draw the Target stim. @@ -95,103 +95,93 @@ def contains(self, p): """ Is point p contained within the Target Stim? :param p: x, y position in stim units - :return: bool + :return: bool: True: p is within the stim """ return self.stim[0].contains(p) class ValidationProcedure(object): def __init__(self, win=None, target=None, positions=None, target_animation={}, randomize_positions=True, - background=None, triggers=None, storeeventsfor=None, accuracy_period_start=0.350, - accuracy_period_stop=.050, show_intro_screen=True, intro_text='Ready to Start Validation Procedure.', + background=None, triggers=None, storeeventsfor=None, accuracy_period_start=0.550, + accuracy_period_stop=.150, show_intro_screen=True, intro_text='Ready to Start Validation Procedure.', show_results_screen=True, results_in_degrees=False, save_results_screen=False, terminate_key="escape", toggle_gaze_cursor_key="g"): """ - ValidationProcedure is used to check the accuracy of a calibrated eye tracking system. - - Once a ValidationProcedure class instance has been created, the `.run()` method - can be called to start the validation process. - - The validation process consists of the following stages: - - 1) Display an Introduction / Instruction screen. A key press is used to - start target presentation. - 2) The validation target presentation sequence. Based on the Target and - PositionGrid objects provided when the ValidationProcedure was created, - a series of target positions are displayed. The progression from one - target position to the next is controlled by the triggers specified. - The target can simply jump from one position to the next, or optional - linear motion settings can be used to have the target move across the - screen from one point to the next. The Target graphic itself can also - be configured to expand or contract once it has reached a location - defined in the position grid. - 3) During stage 2), data is collected from the devices being monitored by - iohub. Specifically eye tracker samples and experiment messages are - collected. - 4) The data collected during the validation target sequence is used to - calculate accuracy information for each target position presented. - The raw data as well as the computed accuracy data is available via the - ValidationProcedure class. Calculated measures are provided separately - for each target position and include: - - a) An array of the samples used for the accuracy calculation. The - samples used are selected using the following criteria: - i) Only samples where the target was stationary and - not expanding or contracting are selected. - - ii) Samples are selected that fall between: + ValidationProcedure is used to calculate the gaze accuracy of a calibrated eye tracking system. - start_time_filter = last_sample_time - accuracy_period_start + Once a ValidationProcedure class instance has been created, the `.run()` method is called to actually start + the validation process, which consists of the following steps: - and + 1) (Optionally) Display an Introduction screen. A 'space' key press is used to start target presentation. + 2) Displaying the validation target at each position being validated. Target progression from one + position to the next is controlled by the specified `triggers`, defaulting to a 'space' key press. + The target graphics can simply jump from one position to the next, or optional target_animation settings + can be used to have the target move across the screen from one point to the next and / or expand / contract + at each target location. + 3) (Optionally) Display a Results screen. The Results screen shows each target position, the position of + each sample used for the accuracy calculation, and some validation result statistics. - end_time_filter = last_sample_time - accuracy_period_end + Data collected during the validation target sequence is used to calculate accuracy information + for each target position presented. The raw data as well as the computed accuracy stats is + available via the `.getValidationResults()` method. - Therefore, the duration of the selected sample period is: + To make the validation output consistent across iohub common eye tracker implementations, validation is + performed on monocular eye data. If binocular eye samples are being recorded, the average of the + left and right eye positions is used for each gaze sample. - selection_period_dur = end_time_filter - start_time_filter + Example: - iii) Sample that contain missing / invalid position data - are then removed, providing the final set of samples - used for accuracy calculations. The min, max, and mean - values from each set of selected samples is calculated. + # Create a *full screen* PsychoPy Window with a valid PsychoPy Monitor file. + win = visual.Window((1920, 1080), fullscr=True, allowGUI=False, monitor='a_VALID_monitor') - b) The x and y error of each samples gaze position relative to the - current target position. This data is in the same units as is - used by the Target instance. Computations are done for each eye - being recorded. The values are signed floats. + # Start the ioHub process. + iohub_config = dict(experiment_code='validation_demo', session_code='default_session') + iohub_config['eyetracker.hw.mouse.EyeTracker'] = dict(name='tracker') + tracker = io.devices.tracker - c) The xy distance error from the from each eye's gaze position to - the target position. This is also calculated as an average of - both eyes when binocular data is available. The data is unsigned, - providing the absolute distance from gaze to target positions + # Run eyetracker calibration + r = tracker.runSetupProcedure() - 5) A 2D plot is created displaying each target position and the position of - each sample used for the accuracy calculation. The minimum, maximum, and - average error is displayed for all target positions. A key press is used - to remove the validation results plot, and control is returned to the - script that started the validation display. Note that the plot is also - saved as a png file in the same directory as the calling script. + # Create a validation target. Use any stim that has `.setPos()`, `.setRadius()`, and `.draw()` methods. + # iohub.client.eyetracker.validation.TargetStim provides a standard doughnut style target. + target_stim = TargetStim(win, radius=0.025, fillcolor=[.5, .5, .5], edgecolor=[-1, -1, -1], edgewidth=2, + dotcolor=[1, -1, -1], dotradius=0.005, colorspace='rgb') - See the validation.py demo in demos.coder.iohub.eyetracker for example usage. + # target_positions: 9 point calibration + target_positions = [(0.0, 0.0), (0.85, 0.85), (-0.85, 0.0), (0.85, 0.0), (0.85, -0.85), (-0.85, 0.85), + (-0.85, -0.85), (0.0, 0.85), (0.0, -0.85)] - :param win: - :param target: - :param positions: + # Create the validation procedure + validation_proc = ValidationProcedure(win, target=target_stim, positions=target_positions, + randomize_positions=False, + show_intro_screen=True, + intro_text='Eye Tracker Validation', + show_results_screen=True, + save_results_screen=True) + + # Run the validation procedure. Method does not return until the validation is complete. + validation_results = validation_proc.run() + + + See the validation.py demo in demos.coder.iohub.eyetracking for a more complete example. + + :param win: PsychoPy window to use for validation. Must be full screen. + :param target: Stimulus to use as validation target. If None, default `TargetStim` is used. + :param positions: Positions to validate. Provide list of x,y pairs, or use a `PositionGrid` class. :param target_animation: - :param randomize_positions: - :param background: - :param triggers: - :param storeeventsfor: - :param accuracy_period_start: - :param accuracy_period_stop: - :param show_intro_screen: - :param intro_text: - :param show_results_screen: - :param results_in_degrees: - :param save_results_screen: - :param terminate_key: - :param toggle_gaze_cursor_key: + :param randomize_positions: bool: Randomize target positions before presentation. + :param background: color: background color of validation screen. + :param show_intro_screen: bool: Display a validation procedure Introduction screen. + :param intro_text: Introduction screen text. + :param show_results_screen: bool: Display a validation procedure Results screen. + :param results_in_degrees: bool: Convert results to visual degrees. + :param save_results_screen: bool: Save results screen as image. + :param terminate_key: Key that will end the validation procedure. Default is 'escape'. + :param toggle_gaze_cursor_key: Key to toggle gaze cursor visibility (hidden to start). Default is key is 'g'. + :param accuracy_period_start: Time prior to target trigger to use as start of period for valid samples. + :param accuracy_period_stop: Time prior to target trigger to use as end of period for valid samples. + :param triggers: Target progression triggers. Default is 'space' key press. + :param storeeventsfor: iohub devices that events should be stored for. """ self.terminate_key = terminate_key self.toggle_gaze_cursor_key = toggle_gaze_cursor_key @@ -237,8 +227,6 @@ def __init__(self, win=None, target=None, positions=None, target_animation={}, r triggers=triggers, storeeventsfor=storeeventsfor, terminate_key=terminate_key, gaze_cursor_key=toggle_gaze_cursor_key) - # Stim for results screen - self.use_dpi = 90 def run(self): """ @@ -268,11 +256,19 @@ def run(self): self._createValidationResults() if self.show_results_screen: - if self.showResultsScreen() is not None: - if self.terminate_key and self.terminate_key in keyboard.waitForPresses(keys=[' ', self.terminate_key]): + self.showResultsScreen() + kb_presses = keyboard.waitForPresses(keys=[' ', self.terminate_key, self.targetsequence.gaze_cursor_key]) + while ' ' not in kb_presses: + if self.targetsequence.gaze_cursor_key in kb_presses: + self.targetsequence.display_gaze = not self.targetsequence.display_gaze + self.showResultsScreen() + if self.terminate_key in kb_presses: print("Escape key pressed. Exiting validation") - self.validation_results = None - return + break + kb_presses = keyboard.waitForPresses(keys=[' ', + self.terminate_key, + self.targetsequence.gaze_cursor_key]) + return self.validation_results def showResultsScreen(self): @@ -300,6 +296,38 @@ def showIntroScreen(self): return self.win.flip() def getValidationResults(self): + """ + Return the validation results dict for the last validation run. If a validation as not yet been run(), + None is returned. Validation results are provided separately for each target position and include: + + a) An array of the samples used for the accuracy calculation. The samples used are selected + using the following criteria: + i) Only samples where the target was stationary and not expanding or contracting are selected. + ii) Samples are selected that fall between: + + start_time_filter = last_sample_time - accuracy_period_start + + and + + end_time_filter = last_sample_time - accuracy_period_end + + Therefore, the duration of the selected sample period is: + + selection_period_dur = end_time_filter - start_time_filter + + iii) Sample that contain missing / invalid position data are removed, providing the + final set of samples used for accuracy calculations. The min, max, and mean values + from each set of selected samples is calculated. + + b) The x and y error of sampled gaze position relative to the current target position. + This data is in the same units as is used by the validation window. + + c) The xy distance error from the from each eye's gaze position to the target position. + This is also calculated as an average of both eyes when binocular data is available. + The data is unsigned, providing the absolute distance from gaze to target positions + + :return: validation results dict. + """ return self.validation_results def _createValidationResults(self): @@ -389,7 +417,7 @@ def _createValidationResults(self): used_samples=good_samples_in_period) position_results = dict(index=pindex, - position=target_positions_used[pindex], + target_position=target_positions_used[pindex], sample_time_range=[first_stime, last_stime], filter_samples_time_range=[filter_stime, filter_etime], valid_filtered_sample_perc=good_sample_ratio) @@ -446,7 +474,6 @@ def _createValidationResults(self): point_count += 1.0 position_results2['calculation_status'] = 'PASSED' - position_results2['target_position'] = (target_x[0], target_y[0]) position_results2['min_error'] = lr_error_min position_results2['max_error'] = lr_error_max position_results2['mean_error'] = lr_error_mean @@ -459,7 +486,7 @@ def _createValidationResults(self): unit_type = self.win.units if self.results_in_degrees: - unit_type = 'degrees' + unit_type = 'degree' mean_error = summed_error / point_count err_results = dict(reporting_unit_type=unit_type, min_error=min_error, max_error=max_error, mean_error=mean_error, passed=results['positions_failed_processing'] == 0, @@ -494,7 +521,7 @@ def drawResultScreen(self): self.targetsequence.target.setPos(tp) self.targetsequence.target.draw() - title_txt = 'Validation Accuracy\nMin: %.4f, Max: %.4f,' \ + title_txt = 'Validation Results\nMin: %.4f, Max: %.4f,' \ ' Mean %.4f (%s units)' % (results['min_error'], results['max_error'], results['mean_error'], results['reporting_unit_type']) title_stim = visual.TextStim(self.win, text=title_txt, height=24, pos=(0.0, (self.win.size[1] / 2.0) * .95), @@ -511,24 +538,25 @@ def drawResultScreen(self): color_list = pl.cm.tab20b(np.linspace(0, 1, (len(results['position_results'])))) # draw eye samples ci = 0 - sample_gfx_radius = deg2pix(0.33, self.win.monitor, correctFlat=False) for position_results in results['position_results']: color = color_list[ci] * 2.0 - 1.0 + utype = 'pix' + target_x, target_y = position_results['target_position'] + + sample_gfx_radius = deg2pix(0.33, self.win.monitor, correctFlat=False) + if self.results_in_degrees: + sample_gfx_radius = 0.33 + utype='deg' sample_gfx = visual.Circle(self.win, radius=sample_gfx_radius, fillColor=color, lineColor=[1, 1, 1], - lineWidth=1, edges=64, units='pix', colorSpace='rgb', opacity=0.66, + lineWidth=1, edges=64, units=utype, colorSpace='rgb', opacity=0.66, interpolate=True, autoLog=False) if position_results['calculation_status'] == 'FAILED': position_txt = "Failed" txt_bold = True position_txt_color = "red" - target_x, target_y = position_results['position'] - text_pix_pos = toPix(self.win, target_x, target_y) - text_pix_pos = text_pix_pos[0][0], text_pix_pos[1][0] else: samples = position_results['sample_from_filter_stages']['used_samples'] - target_x = samples[:]['targ_pos_x'][0] - target_y = samples[:]['targ_pos_y'][0] if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: gaze_x = (samples[:]['left_eye_x'] + samples[:]['right_eye_x']) / 2.0 gaze_y = (samples[:]['left_eye_y'] + samples[:]['right_eye_y']) / 2.0 @@ -537,9 +565,12 @@ def drawResultScreen(self): gaze_y = samples[:]['eye_y'] for i in range(len(gaze_x)): - pix_pos = toPix(self.win, gaze_x[i], gaze_y[i]) - pix_pos = pix_pos[0][0], pix_pos[1][0] - sample_gfx.setPos(pix_pos) + if self.results_in_degrees: + g_pos = gaze_x[i], gaze_y[i] + else: + g_pos = toPix(self.win, gaze_x[i], gaze_y[i]) + g_pos = g_pos[0][0], g_pos[1][0] + sample_gfx.setPos(g_pos) sample_gfx.draw() txt_bold = False position_txt = "Gaze Error:\nMin: %.4f\nMax: %.4f\n" \ @@ -548,13 +579,14 @@ def drawResultScreen(self): position_results['mean_error'], position_results['stdev_error']) position_txt_color = "green" + + if self.targetsequence.display_gaze: text_pix_pos = toPix(self.win, target_x, target_y) text_pix_pos = text_pix_pos[0][0], text_pix_pos[1][0] - - target_text_stim = visual.TextStim(self.win, text=position_txt, units='pix', pos=text_pix_pos, - height=21, color=position_txt_color, antialias=True, bold=txt_bold, - anchorVert='center', anchorHoriz='center') - target_text_stim.draw() + target_text_stim = visual.TextStim(self.win, text=position_txt, units='pix', pos=text_pix_pos, + height=21, color=position_txt_color, antialias=True, + bold=txt_bold, anchorVert='center', anchorHoriz='center') + target_text_stim.draw() ci += 1 From 02e8d1cbe134b3c72667ad458413123fb3ff463f Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Fri, 2 Apr 2021 08:55:15 -0300 Subject: [PATCH 19/26] FF: misc validation class cleanup --- .../client/eyetracker/validation/procedure.py | 43 ++++++++++++++++--- 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/psychopy/iohub/client/eyetracker/validation/procedure.py b/psychopy/iohub/client/eyetracker/validation/procedure.py index 0409422baf1..d9b6d4f1479 100644 --- a/psychopy/iohub/client/eyetracker/validation/procedure.py +++ b/psychopy/iohub/client/eyetracker/validation/procedure.py @@ -22,18 +22,17 @@ import os import sys from matplotlib import pyplot as pl -from collections import OrderedDict -from psychopy import visual, core +from psychopy import visual from psychopy.iohub.util import win32MessagePump, normjoin from psychopy.iohub.constants import EventConstants -from psychopy.iohub.client import ioHubConnection +from psychopy.iohub.client import ioHubConnection, Computer from psychopy.tools.monitorunittools import convertToPix from psychopy.tools.monitorunittools import pix2deg, deg2pix from psychopy.iohub.client.eyetracker.validation import PositionGrid, Trigger, KeyboardTrigger -getTime = core.getTime +getTime = Computer.getTime class TargetStim(object): @@ -326,6 +325,38 @@ def getValidationResults(self): This is also calculated as an average of both eyes when binocular data is available. The data is unsigned, providing the absolute distance from gaze to target positions + Validation Results Dict Structure + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + {'display_bounds': [-1.0, 1.0, 1.0, -1.0], + 'display_pix': array([1920, 1080]), + 'display_units': 'norm', + 'max_error': 2.3668638421479, + 'mean_error': 0.9012516727129639, + 'min_error': 0.0, + 'passed': True, + 'position_count': 9, + 'positions_failed_processing': 0, + 'reporting_unit_type': 'degree', + 'target_positions': [array([0., 0.]), array([0.85, 0.85]), array([-0.85, 0. ]), + array([0.85, 0. ]), array([ 0.85, -0.85]), array([-0.85, 0.85]), + array([-0.85, -0.85]), array([0. , 0.85]), array([ 0. , -0.85])], + 'position_results': [{'index': 0, + 'calculation_status': 'PASSED', + 'target_position': array([0., 0.]), + 'sample_time_range': [4.774341499977744, 6.8343414999777], + 'filter_samples_time_range': [6.2843414999777005, 6.6843414999777], + 'min_error': 0.0, + 'max_error': 0.7484680652684592, + 'mean_error': 0.39518431321527914, + 'stdev_error': 0.24438398690651483, + 'valid_filtered_sample_perc': 1.0, + }, + # Validation results dict is given for each target position + # .... + ] + } + :return: validation results dict. """ return self.validation_results @@ -403,9 +434,9 @@ def _createValidationResults(self): good_samples_in_period = [] good_sample_ratio = 0 - # Ordered dictionary of the different levels of samples selected during filtering + # Dictionary of the different levels of samples selected during filtering # for valid samples to use in accuracy calculations. - sample_msg_data_filtering = OrderedDict(all_samples=samplesforpos, # All samples from target period. + sample_msg_data_filtering = dict(all_samples=samplesforpos, # All samples from target period. # Sample during stationary period at end of target # presentation display. stationary_samples=stationary_samples, From 37009c3bf5a0581fed7ca00482115148723bec2e Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Tue, 6 Apr 2021 05:35:30 -0300 Subject: [PATCH 20/26] Updated copyright dates on iohub module --- psychopy/demos/coder/iohub/eyetracking/validation.py | 3 ++- psychopy/iohub/__init__.py | 5 ++--- psychopy/iohub/client/__init__.py | 5 ++--- psychopy/iohub/client/connect.py | 2 +- .../iohub/client/eyetracker/validation/procedure.py | 2 +- psychopy/iohub/client/keyboard.py | 2 +- psychopy/iohub/client/wintab.py | 2 +- psychopy/iohub/constants.py | 5 ++--- psychopy/iohub/datastore/__init__.py | 3 +-- psychopy/iohub/datastore/util.py | 3 +-- psychopy/iohub/devices/__init__.py | 6 ++---- psychopy/iohub/devices/computer.py | 6 ++---- psychopy/iohub/devices/deviceConfigValidation.py | 2 +- psychopy/iohub/devices/display/__init__.py | 2 +- psychopy/iohub/devices/eventfilters.py | 2 +- psychopy/iohub/devices/experiment/__init__.py | 3 +-- psychopy/iohub/devices/eyetracker/__init__.py | 4 ++-- psychopy/iohub/devices/eyetracker/eye_events.py | 12 ++++-------- psychopy/iohub/devices/eyetracker/hw/__init__.py | 3 +-- .../devices/eyetracker/hw/gazepoint/__init__.py | 5 ++--- .../devices/eyetracker/hw/gazepoint/gp3/__init__.py | 5 ++--- .../eyetracker/hw/gazepoint/gp3/eyetracker.py | 4 +--- .../iohub/devices/eyetracker/hw/mouse/__init__.py | 4 ++-- .../iohub/devices/eyetracker/hw/mouse/eyetracker.py | 2 +- .../devices/eyetracker/hw/sr_research/__init__.py | 8 ++++---- .../eyetracker/hw/sr_research/eyelink/__init__.py | 4 ++-- .../eyelink/eyeLinkCoreGraphicsIOHubPsychopy.py | 9 +++------ .../eyetracker/hw/sr_research/eyelink/eyetracker.py | 6 +++--- .../iohub/devices/eyetracker/hw/tobii/__init__.py | 3 +-- .../iohub/devices/eyetracker/hw/tobii/eyetracker.py | 4 +--- .../eyetracker/hw/tobii/tobiiCalibrationGraphics.py | 6 +----- .../devices/eyetracker/hw/tobii/tobiiwrapper.py | 5 ++++- psychopy/iohub/devices/keyboard/__init__.py | 3 ++- psychopy/iohub/devices/keyboard/darwin.py | 2 +- psychopy/iohub/devices/keyboard/darwinkey.py | 2 +- psychopy/iohub/devices/keyboard/linux2.py | 2 +- psychopy/iohub/devices/keyboard/win32.py | 2 +- psychopy/iohub/devices/mouse/__init__.py | 2 +- psychopy/iohub/devices/mouse/darwin.py | 2 +- psychopy/iohub/devices/mouse/linux2.py | 2 +- psychopy/iohub/devices/mouse/win32.py | 2 +- psychopy/iohub/devices/serial/__init__.py | 2 +- psychopy/iohub/devices/wintab/__init__.py | 7 +------ psychopy/iohub/devices/wintab/win32.py | 2 +- psychopy/iohub/errors.py | 4 ++-- psychopy/iohub/net.py | 2 +- psychopy/iohub/server.py | 2 +- psychopy/iohub/start_iohub_process.py | 2 +- psychopy/iohub/util/__init__.py | 3 ++- psychopy/iohub/util/visualangle.py | 3 +++ 50 files changed, 79 insertions(+), 104 deletions(-) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation.py b/psychopy/demos/coder/iohub/eyetracking/validation.py index 110e8ca0672..74663ef11e3 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation.py @@ -41,7 +41,8 @@ target_positions = [(0.0, 0.0), (0.85, 0.85), (-0.85, 0.0), (0.85, 0.0), (0.85, -0.85), (-0.85, 0.85), (-0.85, -0.85), (0.0, 0.85), (0.0, -0.85)] - # Create a validation procedure + # Create a validation procedure, iohub must already be running with an + # eye tracker device, or errors will occur. validation_proc = ValidationProcedure(win, target=target_stim, positions=target_positions, diff --git a/psychopy/iohub/__init__.py b/psychopy/iohub/__init__.py index ab5bdc8dbce..6374cec4510 100644 --- a/psychopy/iohub/__init__.py +++ b/psychopy/iohub/__init__.py @@ -1,8 +1,7 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- - +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function diff --git a/psychopy/iohub/client/__init__.py b/psychopy/iohub/client/__init__.py index a6ac548bb0f..b2d718b102d 100644 --- a/psychopy/iohub/client/__init__.py +++ b/psychopy/iohub/client/__init__.py @@ -1,8 +1,7 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- - +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function from past.builtins import unicode diff --git a/psychopy/iohub/client/connect.py b/psychopy/iohub/client/connect.py index 61ef8fbb8c7..d02c270d49f 100644 --- a/psychopy/iohub/client/connect.py +++ b/psychopy/iohub/client/connect.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function import os diff --git a/psychopy/iohub/client/eyetracker/validation/procedure.py b/psychopy/iohub/client/eyetracker/validation/procedure.py index d9b6d4f1479..b1cf9ae97df 100644 --- a/psychopy/iohub/client/eyetracker/validation/procedure.py +++ b/psychopy/iohub/client/eyetracker/validation/procedure.py @@ -106,7 +106,7 @@ def __init__(self, win=None, target=None, positions=None, target_animation={}, r show_results_screen=True, results_in_degrees=False, save_results_screen=False, terminate_key="escape", toggle_gaze_cursor_key="g"): """ - ValidationProcedure is used to calculate the gaze accuracy of a calibrated eye tracking system. + ValidationProcedure is used to test the gaze accuracy of a calibrated eye tracking system. Once a ValidationProcedure class instance has been created, the `.run()` method is called to actually start the validation process, which consists of the following steps: diff --git a/psychopy/iohub/client/keyboard.py b/psychopy/iohub/client/keyboard.py index d89483140d6..26f22985048 100644 --- a/psychopy/iohub/client/keyboard.py +++ b/psychopy/iohub/client/keyboard.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function diff --git a/psychopy/iohub/client/wintab.py b/psychopy/iohub/client/wintab.py index d39113cf490..448653c0cb6 100644 --- a/psychopy/iohub/client/wintab.py +++ b/psychopy/iohub/client/wintab.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function diff --git a/psychopy/iohub/constants.py b/psychopy/iohub/constants.py index f4693fb5067..ee25692ff27 100644 --- a/psychopy/iohub/constants.py +++ b/psychopy/iohub/constants.py @@ -1,8 +1,7 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- - +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import diff --git a/psychopy/iohub/datastore/__init__.py b/psychopy/iohub/datastore/__init__.py index be26f94ed50..bca0499b582 100644 --- a/psychopy/iohub/datastore/__init__.py +++ b/psychopy/iohub/datastore/__init__.py @@ -1,8 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function diff --git a/psychopy/iohub/datastore/util.py b/psychopy/iohub/datastore/util.py index 84307761913..93ee54376a1 100644 --- a/psychopy/iohub/datastore/util.py +++ b/psychopy/iohub/datastore/util.py @@ -1,8 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- - # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function diff --git a/psychopy/iohub/devices/__init__.py b/psychopy/iohub/devices/__init__.py index 103a06a271b..f76f4602620 100644 --- a/psychopy/iohub/devices/__init__.py +++ b/psychopy/iohub/devices/__init__.py @@ -1,8 +1,6 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function diff --git a/psychopy/iohub/devices/computer.py b/psychopy/iohub/devices/computer.py index 836d99b3580..ee8e3e0d44b 100644 --- a/psychopy/iohub/devices/computer.py +++ b/psychopy/iohub/devices/computer.py @@ -1,8 +1,6 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import from __future__ import print_function diff --git a/psychopy/iohub/devices/deviceConfigValidation.py b/psychopy/iohub/devices/deviceConfigValidation.py index 5bbfc48b2e0..47723700af0 100644 --- a/psychopy/iohub/devices/deviceConfigValidation.py +++ b/psychopy/iohub/devices/deviceConfigValidation.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from past.builtins import basestring import socket diff --git a/psychopy/iohub/devices/display/__init__.py b/psychopy/iohub/devices/display/__init__.py index 3272db982a1..c590d68ece1 100644 --- a/psychopy/iohub/devices/display/__init__.py +++ b/psychopy/iohub/devices/display/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). import sys diff --git a/psychopy/iohub/devices/eventfilters.py b/psychopy/iohub/devices/eventfilters.py index a9d60feea30..fd289f8722d 100644 --- a/psychopy/iohub/devices/eventfilters.py +++ b/psychopy/iohub/devices/eventfilters.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import from __future__ import print_function diff --git a/psychopy/iohub/devices/experiment/__init__.py b/psychopy/iohub/devices/experiment/__init__.py index 88fc0cca0dc..19dd5ef22ec 100644 --- a/psychopy/iohub/devices/experiment/__init__.py +++ b/psychopy/iohub/devices/experiment/__init__.py @@ -1,8 +1,7 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). - from past.types import basestring import numpy as N from .. import Device, DeviceEvent, Computer, Device diff --git a/psychopy/iohub/devices/eyetracker/__init__.py b/psychopy/iohub/devices/eyetracker/__init__.py index 1c9b8646536..be2da8b2e20 100644 --- a/psychopy/iohub/devices/eyetracker/__init__.py +++ b/psychopy/iohub/devices/eyetracker/__init__.py @@ -1,6 +1,6 @@ -"""ioHub Common Eye Tracker Interface""" +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from .. import Device, ioDeviceError from ...constants import DeviceConstants, EyeTrackerConstants diff --git a/psychopy/iohub/devices/eyetracker/eye_events.py b/psychopy/iohub/devices/eyetracker/eye_events.py index 356bb8a58d6..c53e848d240 100644 --- a/psychopy/iohub/devices/eyetracker/eye_events.py +++ b/psychopy/iohub/devices/eyetracker/eye_events.py @@ -1,16 +1,12 @@ -"""ioHub Common Eye Tracker Interface""" - # Part of the psychopy.iohub library. - # Copyright (C) 2012-2016 iSolver Software Solutions - # Distributed under the terms of the GNU General Public License (GPL). - +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). from .. import DeviceEvent from ...constants import EventConstants from . import EyeTrackerDevice import numpy as np -##################### Eye Tracker Sample Stream Types #################### -# - class EyeTrackerEvent(DeviceEvent): PARENT_DEVICE = EyeTrackerDevice diff --git a/psychopy/iohub/devices/eyetracker/hw/__init__.py b/psychopy/iohub/devices/eyetracker/hw/__init__.py index 9b15f906ff8..41998c05094 100644 --- a/psychopy/iohub/devices/eyetracker/hw/__init__.py +++ b/psychopy/iohub/devices/eyetracker/hw/__init__.py @@ -1,5 +1,4 @@ -"""ioHub Common Eye Tracker Interface""" # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). \ No newline at end of file diff --git a/psychopy/iohub/devices/eyetracker/hw/gazepoint/__init__.py b/psychopy/iohub/devices/eyetracker/hw/gazepoint/__init__.py index 4f67d36a10c..c49f4ec8b7d 100644 --- a/psychopy/iohub/devices/eyetracker/hw/gazepoint/__init__.py +++ b/psychopy/iohub/devices/eyetracker/hw/gazepoint/__init__.py @@ -1,7 +1,6 @@ -"""ioHub Common Eye Tracker Interface""" - +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import absolute_import diff --git a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/__init__.py b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/__init__.py index 0ef2c3bbf04..c7393e1dd1a 100644 --- a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/__init__.py +++ b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/__init__.py @@ -1,7 +1,6 @@ -"""ioHub Common Eye Tracker Interface""" - +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import absolute_import diff --git a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/eyetracker.py b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/eyetracker.py index dca5795bcd9..d08e1bfac92 100644 --- a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/eyetracker.py +++ b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/eyetracker.py @@ -1,9 +1,7 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). -# .. fileauthor:: Martin Guest -# .. fileauthor:: Sol Simpson from __future__ import division from ......errors import print2err, printExceptionDetailsToStdErr from ......constants import EventConstants, EyeTrackerConstants diff --git a/psychopy/iohub/devices/eyetracker/hw/mouse/__init__.py b/psychopy/iohub/devices/eyetracker/hw/mouse/__init__.py index 63468191ae5..8b86d234bf8 100644 --- a/psychopy/iohub/devices/eyetracker/hw/mouse/__init__.py +++ b/psychopy/iohub/devices/eyetracker/hw/mouse/__init__.py @@ -1,6 +1,6 @@ -"""ioHub Common Eye Tracker Interface""" +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from .eyetracker import EyeTracker from psychopy.iohub.devices.eyetracker import (MonocularEyeSampleEvent, FixationStartEvent, FixationEndEvent, diff --git a/psychopy/iohub/devices/eyetracker/hw/mouse/eyetracker.py b/psychopy/iohub/devices/eyetracker/hw/mouse/eyetracker.py index 11d587d4a93..3bd3bc8487a 100644 --- a/psychopy/iohub/devices/eyetracker/hw/mouse/eyetracker.py +++ b/psychopy/iohub/devices/eyetracker/hw/mouse/eyetracker.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from psychopy.iohub.errors import print2err, printExceptionDetailsToStdErr from psychopy.iohub.constants import EyeTrackerConstants, EventConstants diff --git a/psychopy/iohub/devices/eyetracker/hw/sr_research/__init__.py b/psychopy/iohub/devices/eyetracker/hw/sr_research/__init__.py index 1f46e00f341..41998c05094 100644 --- a/psychopy/iohub/devices/eyetracker/hw/sr_research/__init__.py +++ b/psychopy/iohub/devices/eyetracker/hw/sr_research/__init__.py @@ -1,4 +1,4 @@ -"""ioHub Common Eye Tracker Interface for EyeLink(C) Systems""" - # Part of the psychopy.iohub library. - # Copyright (C) 2012-2016 iSolver Software Solutions - # Distributed under the terms of the GNU General Public License (GPL). \ No newline at end of file +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). \ No newline at end of file diff --git a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/__init__.py b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/__init__.py index 41c98d4d3b0..50dde39f6ad 100644 --- a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/__init__.py +++ b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/__init__.py @@ -1,6 +1,6 @@ -"""ioHub Common Eye Tracker Interface for EyeLink(C) Systems""" +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from .eyetracker import (EyeTracker, MonocularEyeSampleEvent, BinocularEyeSampleEvent, FixationStartEvent, diff --git a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyeLinkCoreGraphicsIOHubPsychopy.py b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyeLinkCoreGraphicsIOHubPsychopy.py index 3edb97e2fd9..b0515427442 100644 --- a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyeLinkCoreGraphicsIOHubPsychopy.py +++ b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyeLinkCoreGraphicsIOHubPsychopy.py @@ -1,9 +1,6 @@ -""" -ioHub Common Eye Tracker Interface for EyeLink(C) Systems. -EyeLink(C) calibration graphics implemented using PsychoPy. -""" -# Part of the PsychoPy.iohub library -# Copyright (C) 2012-2016 iSolver Software Solutions +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). import numpy as np from PIL import Image, ImageOps diff --git a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyetracker.py b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyetracker.py index a4fb8e30446..4abb47bdbdc 100644 --- a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyetracker.py +++ b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyetracker.py @@ -1,6 +1,6 @@ -"""ioHub Common Eye Tracker Interface for EyeLink(C) Systems""" -# Part of the PsychoPy.iohub library -# Copyright (C) 2012-2016 iSolver Software Solutions +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). import os import numpy as np diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/__init__.py b/psychopy/iohub/devices/eyetracker/hw/tobii/__init__.py index c4e85f525eb..4e7fa4d5ca3 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/__init__.py +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/__init__.py @@ -1,8 +1,7 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). -"""ioHub Common Eye Tracker Interface for Tobii (C) Eye Tracking System""" from __future__ import absolute_import diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/eyetracker.py b/psychopy/iohub/devices/eyetracker/hw/tobii/eyetracker.py index 5fd334b5d70..b67ee10bedc 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/eyetracker.py +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/eyetracker.py @@ -1,9 +1,7 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). - -"""ioHub Common Eye Tracker Interface for Tobii (C) Eye Tracking System""" from __future__ import absolute_import import math from .....constants import EventConstants, EyeTrackerConstants diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiCalibrationGraphics.py b/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiCalibrationGraphics.py index 25fc05f8a27..66ca4e40c45 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiCalibrationGraphics.py +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiCalibrationGraphics.py @@ -1,10 +1,6 @@ -""" -ioHub Common Eye Tracker Interface for Tobii (C) Eye Tracking System. -Calibration graphics implemented using PsychoPy. -""" # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). import psychopy diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiwrapper.py b/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiwrapper.py index 35cff850298..4c1c4ad5bc5 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiwrapper.py +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiwrapper.py @@ -1,4 +1,7 @@ -"""ioHub Common Eye Tracker Interface for Tobii (C) Eye Tracking System.""" +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). from __future__ import print_function # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. diff --git a/psychopy/iohub/devices/keyboard/__init__.py b/psychopy/iohub/devices/keyboard/__init__.py index d37083aba1a..776a133b2a6 100644 --- a/psychopy/iohub/devices/keyboard/__init__.py +++ b/psychopy/iohub/devices/keyboard/__init__.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). global Keyboard diff --git a/psychopy/iohub/devices/keyboard/darwin.py b/psychopy/iohub/devices/keyboard/darwin.py index 36e45a96a3f..a927b94a971 100644 --- a/psychopy/iohub/devices/keyboard/darwin.py +++ b/psychopy/iohub/devices/keyboard/darwin.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from copy import copy import Quartz as Qz diff --git a/psychopy/iohub/devices/keyboard/darwinkey.py b/psychopy/iohub/devices/keyboard/darwinkey.py index eab186c0762..3b68b2099e2 100644 --- a/psychopy/iohub/devices/keyboard/darwinkey.py +++ b/psychopy/iohub/devices/keyboard/darwinkey.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). # /System/Library/Frameworks/Carbon.framework/Versions/A/Frameworks/ diff --git a/psychopy/iohub/devices/keyboard/linux2.py b/psychopy/iohub/devices/keyboard/linux2.py index f04ad080c04..17d883ab248 100644 --- a/psychopy/iohub/devices/keyboard/linux2.py +++ b/psychopy/iohub/devices/keyboard/linux2.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from . import ioHubKeyboardDevice diff --git a/psychopy/iohub/devices/keyboard/win32.py b/psychopy/iohub/devices/keyboard/win32.py index a95f2d041df..e1306574d70 100644 --- a/psychopy/iohub/devices/keyboard/win32.py +++ b/psychopy/iohub/devices/keyboard/win32.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). try: diff --git a/psychopy/iohub/devices/mouse/__init__.py b/psychopy/iohub/devices/mouse/__init__.py index bfbcf7d2b90..0c8be0f4fff 100644 --- a/psychopy/iohub/devices/mouse/__init__.py +++ b/psychopy/iohub/devices/mouse/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, print_function, absolute_import diff --git a/psychopy/iohub/devices/mouse/darwin.py b/psychopy/iohub/devices/mouse/darwin.py index 2028a6d2047..59dedf37af6 100644 --- a/psychopy/iohub/devices/mouse/darwin.py +++ b/psychopy/iohub/devices/mouse/darwin.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, print_function, absolute_import diff --git a/psychopy/iohub/devices/mouse/linux2.py b/psychopy/iohub/devices/mouse/linux2.py index 9584939e807..f4adca7cd80 100644 --- a/psychopy/iohub/devices/mouse/linux2.py +++ b/psychopy/iohub/devices/mouse/linux2.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, print_function, absolute_import diff --git a/psychopy/iohub/devices/mouse/win32.py b/psychopy/iohub/devices/mouse/win32.py index 79ec13cf436..0ee0eb07b41 100644 --- a/psychopy/iohub/devices/mouse/win32.py +++ b/psychopy/iohub/devices/mouse/win32.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, print_function, absolute_import diff --git a/psychopy/iohub/devices/serial/__init__.py b/psychopy/iohub/devices/serial/__init__.py index 3550fd8202d..b5fb7b7a405 100644 --- a/psychopy/iohub/devices/serial/__init__.py +++ b/psychopy/iohub/devices/serial/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). import serial import sys diff --git a/psychopy/iohub/devices/wintab/__init__.py b/psychopy/iohub/devices/wintab/__init__.py index d5df757f1eb..9e5283635bd 100644 --- a/psychopy/iohub/devices/wintab/__init__.py +++ b/psychopy/iohub/devices/wintab/__init__.py @@ -1,13 +1,8 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). -# -# TODO List -# -# 2) Check for missing serial numbers in PACKET evt stream. -# _is_epydoc = False # Pen digitizers /tablets that support Wintab API diff --git a/psychopy/iohub/devices/wintab/win32.py b/psychopy/iohub/devices/wintab/win32.py index 2d9e1ccf524..dde8087f317 100644 --- a/psychopy/iohub/devices/wintab/win32.py +++ b/psychopy/iohub/devices/wintab/win32.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). # Initial file based on pyglet.libs.win32 diff --git a/psychopy/iohub/errors.py b/psychopy/iohub/errors.py index e62c44ee251..a9a339ff9a1 100644 --- a/psychopy/iohub/errors.py +++ b/psychopy/iohub/errors.py @@ -1,6 +1,6 @@ -# coding=utf-8 +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function diff --git a/psychopy/iohub/net.py b/psychopy/iohub/net.py index 6080e3459ce..47af4b12ac0 100644 --- a/psychopy/iohub/net.py +++ b/psychopy/iohub/net.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import diff --git a/psychopy/iohub/server.py b/psychopy/iohub/server.py index 9b3bd12ac55..59580e89159 100644 --- a/psychopy/iohub/server.py +++ b/psychopy/iohub/server.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import diff --git a/psychopy/iohub/start_iohub_process.py b/psychopy/iohub/start_iohub_process.py index a6c4a091fc1..ead929f3184 100644 --- a/psychopy/iohub/start_iohub_process.py +++ b/psychopy/iohub/start_iohub_process.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import import json diff --git a/psychopy/iohub/util/__init__.py b/psychopy/iohub/util/__init__.py index 8d938e74428..4d2a206bd22 100644 --- a/psychopy/iohub/util/__init__.py +++ b/psychopy/iohub/util/__init__.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division diff --git a/psychopy/iohub/util/visualangle.py b/psychopy/iohub/util/visualangle.py index d728c951af4..6567763a0bc 100644 --- a/psychopy/iohub/util/visualangle.py +++ b/psychopy/iohub/util/visualangle.py @@ -1,4 +1,7 @@ # -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). from __future__ import division from builtins import object """ From d469aaf378002d812acfd20d0a35f5285526f53c Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Tue, 6 Apr 2021 09:54:59 -0300 Subject: [PATCH 21/26] BF: builder integration util function --- psychopy/iohub/util/__init__.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/psychopy/iohub/util/__init__.py b/psychopy/iohub/util/__init__.py index 4d2a206bd22..2b41abdb45d 100644 --- a/psychopy/iohub/util/__init__.py +++ b/psychopy/iohub/util/__init__.py @@ -145,7 +145,6 @@ def getDevicePaths(device_name=""): iohub_device_path = module_directory(import_device) if device_name: iohub_device_path = os.path.join(iohub_device_path, device_name.replace('.', os.path.sep)) - scs_yaml_paths = [] for root, dirs, files in os.walk(iohub_device_path): device_folder = None @@ -183,6 +182,8 @@ def getDeviceDefaultConfig(device_name, builder_hides=True): 'save_events': True, 'stream_events': True} """ + if device_name.endswith(".EyeTracker"): + device_name = device_name[:-11] device_paths = getDevicePaths(device_name) device_configs = [] for dpath, dconf in device_paths: @@ -227,13 +228,14 @@ def getDeviceNames(device_name="eyetracker.hw"): print(eyetrackers) Output: - ['eyetracker.hw.gazepoint.gp3', 'eyetracker.hw.sr_research.eyelink', 'eyetracker.hw.tobii'] + ['eyetracker.hw.gazepoint.gp3.EyeTracker', 'eyetracker.hw.sr_research.eyelink.EyeTracker', + 'eyetracker.hw.tobii.EyeTracker'] """ names = [] dconfigs = getDeviceDefaultConfig(device_name) for dcfg in dconfigs: d_name = tuple(dcfg.keys())[0] - d_name = d_name[:d_name.rfind('.')] + #d_name = d_name[:d_name.rfind('.')] names.append(d_name) return names @@ -245,6 +247,8 @@ def getDeviceFile(device_name, file_name): :param: file_name: name of device yaml file to load :return: dict """ + if device_name.endswith(".EyeTracker"): + device_name = device_name[:-11] device_paths = getDevicePaths(device_name) device_sconfigs = [] for dpath, _ in device_paths: From 3fa9a177867de5fcfa16f10700fbb7fa193ab8ad Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Tue, 6 Apr 2021 10:17:16 -0300 Subject: [PATCH 22/26] FF: eye tracker config 'name' needs to be exposed to Builder But is static / can not be changed by user. --- .../eyetracker/hw/gazepoint/gp3/default_eyetracker.yaml | 2 +- .../hw/gazepoint/gp3/supported_config_settings.yaml | 8 ++------ .../devices/eyetracker/hw/mouse/default_eyetracker.yaml | 2 +- .../eyetracker/hw/mouse/supported_config_settings.yaml | 2 +- .../hw/sr_research/eyelink/default_eyetracker.yaml | 2 +- .../hw/sr_research/eyelink/supported_config_settings.yaml | 8 ++------ .../devices/eyetracker/hw/tobii/default_eyetracker.yaml | 2 +- .../eyetracker/hw/tobii/supported_config_settings.yaml | 8 ++------ 8 files changed, 11 insertions(+), 23 deletions(-) diff --git a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/default_eyetracker.yaml b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/default_eyetracker.yaml index 5ccac79c93d..ae3331a5cf7 100644 --- a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/default_eyetracker.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/default_eyetracker.yaml @@ -66,5 +66,5 @@ eyetracker.hw.gazepoint.gp3.EyeTracker: # device configuration, or you can complete any ones that are relevant for FYI # purposes only at this time. device_number: 0 - builder_hides: [ 'builder_hides', 'enable', 'name', 'device_number', 'device_timer', 'auto_report_events', 'runtime_settings.track_eyes', 'calibration.use_builtin' ] + builder_hides: [ 'builder_hides', 'enable', 'device_number', 'device_timer', 'auto_report_events', 'runtime_settings.track_eyes', 'calibration.use_builtin' ] diff --git a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/supported_config_settings.yaml b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/supported_config_settings.yaml index fd602ba7833..8cd7662fa72 100644 --- a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/supported_config_settings.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/supported_config_settings.yaml @@ -1,9 +1,5 @@ eyetracker.hw.gazepoint.gp3.EyeTracker: - name: - IOHUB_STRING: - min_length: 1 - max_length: 32 - first_char_alpha: True + name: tracker enable: IOHUB_BOOL model_name: IOHUB_LIST: @@ -52,4 +48,4 @@ eyetracker.hw.gazepoint.gp3.EyeTracker: max: 2.5 device_number: 0 manufacturer_name: GazePoint - builder_hides: ['builder_hides', 'enable', 'name', 'device_number', 'device_timer', 'auto_report_events', 'runtime_settings.track_eyes', 'calibration.use_builtin'] + builder_hides: ['builder_hides', 'enable', 'device_number', 'device_timer', 'auto_report_events', 'runtime_settings.track_eyes', 'calibration.use_builtin'] diff --git a/psychopy/iohub/devices/eyetracker/hw/mouse/default_eyetracker.yaml b/psychopy/iohub/devices/eyetracker/hw/mouse/default_eyetracker.yaml index c8e9f823d6b..3cff6d62e51 100644 --- a/psychopy/iohub/devices/eyetracker/hw/mouse/default_eyetracker.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/mouse/default_eyetracker.yaml @@ -18,5 +18,5 @@ eyetracker.hw.mouse.EyeTracker: manufacturer_name: MouseGaze auto_report_events: False device_number: 0 - builder_hides: ['builder_hides', 'enable', 'name', 'device_number', 'device_timer', 'auto_report_events'] + builder_hides: ['builder_hides', 'enable', 'device_number', 'device_timer', 'auto_report_events'] diff --git a/psychopy/iohub/devices/eyetracker/hw/mouse/supported_config_settings.yaml b/psychopy/iohub/devices/eyetracker/hw/mouse/supported_config_settings.yaml index ba98370d2ce..1e2d41836af 100644 --- a/psychopy/iohub/devices/eyetracker/hw/mouse/supported_config_settings.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/mouse/supported_config_settings.yaml @@ -43,4 +43,4 @@ eyetracker.hw.mouse.EyeTracker: track_eyes: RIGHT_EYE device_number: 0 manufacturer_name: MouseGaze - builder_hides: ['builder_hides', 'enable', 'name', 'device_number', 'device_timer', 'auto_report_events'] + builder_hides: ['builder_hides', 'enable', 'device_number', 'device_timer', 'auto_report_events'] diff --git a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/default_eyetracker.yaml b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/default_eyetracker.yaml index b3c6381648c..0f5c8f9ac7e 100644 --- a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/default_eyetracker.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/default_eyetracker.yaml @@ -219,4 +219,4 @@ eyetracker.hw.sr_research.eyelink.EyeTracker: # device_number is not used by this device type. # device_number: 0 - builder_hides: ['builder_hides', 'enable', 'name', 'device_number', 'device_timer', 'auto_report_events'] \ No newline at end of file + builder_hides: ['builder_hides', 'enable', 'device_number', 'device_timer', 'auto_report_events'] \ No newline at end of file diff --git a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/supported_config_settings.yaml b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/supported_config_settings.yaml index d4850951c18..31a3c4149b5 100644 --- a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/supported_config_settings.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/supported_config_settings.yaml @@ -1,9 +1,5 @@ eyetracker.hw.sr_research.eyelink.EyeTracker: - name: - IOHUB_STRING: - min_length: 1 - max_length: 32 - first_char_alpha: True + name: tracker enable: IOHUB_BOOL save_events: IOHUB_BOOL stream_events: IOHUB_BOOL @@ -105,6 +101,6 @@ eyetracker.hw.sr_research.eyelink.EyeTracker: # manufacturer_name: Used by Builder as the displayed name in the eye tracker selection dropdown. # manufacturer_name: SR Research Ltd - builder_hides: ['builder_hides', 'enable', 'name', 'device_number', 'device_timer', 'auto_report_events'] + builder_hides: ['builder_hides', 'enable', 'device_number', 'device_timer', 'auto_report_events'] diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/default_eyetracker.yaml b/psychopy/iohub/devices/eyetracker/hw/tobii/default_eyetracker.yaml index 24d247bb043..dffbfc07d2e 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/default_eyetracker.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/default_eyetracker.yaml @@ -161,4 +161,4 @@ eyetracker.hw.tobii.EyeTracker: device_number: 0 - builder_hides: ['builder_hides', 'enable', 'name', 'calibration.target_positions', 'device_number', 'auto_report_events'] \ No newline at end of file + builder_hides: ['builder_hides', 'enable', 'calibration.target_positions', 'device_number', 'auto_report_events'] \ No newline at end of file diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/supported_config_settings.yaml b/psychopy/iohub/devices/eyetracker/hw/tobii/supported_config_settings.yaml index c1611893d12..aa3bf538923 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/supported_config_settings.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/supported_config_settings.yaml @@ -1,9 +1,5 @@ eyetracker.hw.tobii.EyeTracker: - name: - IOHUB_STRING: - min_length: 1 - max_length: 32 - first_char_alpha: True + name: tracker enable: IOHUB_BOOL model_name: IOHUB_STRING: @@ -83,4 +79,4 @@ eyetracker.hw.tobii.EyeTracker: contract_only: IOHUB_BOOL device_number: 0 manufacturer_name: Tobii Technology - builder_hides: ['builder_hides', 'enable', 'name', 'device_number', 'calibration.target_positions', 'auto_report_events'] \ No newline at end of file + builder_hides: ['builder_hides', 'enable', 'device_number', 'calibration.target_positions', 'auto_report_events'] \ No newline at end of file From af11e3425f1355fe0ac7e9ee953bb71c23594d9f Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Wed, 7 Apr 2021 08:55:40 -0300 Subject: [PATCH 23/26] FF: Added PositionGrid example code to validation demo. --- psychopy/demos/coder/iohub/eyetracking/validation.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation.py b/psychopy/demos/coder/iohub/eyetracking/validation.py index 74663ef11e3..b72a36702c4 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation.py @@ -7,7 +7,7 @@ import time from psychopy import visual from psychopy.iohub import launchHubServer -from psychopy.iohub.client.eyetracker.validation import TargetStim, ValidationProcedure +from psychopy.iohub.client.eyetracker.validation import TargetStim, ValidationProcedure, PositionGrid if __name__ == "__main__": # Create a default PsychoPy Window @@ -37,16 +37,17 @@ dotcolor=[1, -1, -1], dotradius=0.005, units='norm', colorspace='rgb') # target_positions: Provide your own list of validation positions, - # or use the PositionGrid class to generate a set. target_positions = [(0.0, 0.0), (0.85, 0.85), (-0.85, 0.0), (0.85, 0.0), (0.85, -0.85), (-0.85, 0.85), (-0.85, -0.85), (0.0, 0.85), (0.0, -0.85)] + # or use the PositionGrid class to generate a set. + #target_positions = PositionGrid(bounds=[-.85, .85, .85, -.85], shape=(3, 3), firstposindex=4, repeatFirstPos=True) # Create a validation procedure, iohub must already be running with an # eye tracker device, or errors will occur. validation_proc = ValidationProcedure(win, target=target_stim, positions=target_positions, - randomize_positions=False, + randomize_positions=True, target_animation=dict(velocity=1.0, expandedscale=3.0, expansionduration=0.2, From e78bb971a63b5c4ada19bd8e0e0c27febc1cd0d8 Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Wed, 7 Apr 2021 10:47:58 -0300 Subject: [PATCH 24/26] FF: iohub eye tracker updates related to calibration return values FF: eyelink returning dict from runSetupProcedure(), not sure how useful it will be yet. FF: cleanup iohub gazepoint calibration results dict --- .../eyetracker/hw/gazepoint/gp3/eyetracker.py | 19 +++--- .../eyeLinkCoreGraphicsIOHubPsychopy.py | 2 + .../hw/sr_research/eyelink/eyetracker.py | 58 ++++++++++++------- .../devices/eyetracker/hw/tobii/eyetracker.py | 6 +- .../hw/tobii/tobiiCalibrationGraphics.py | 25 ++++---- 5 files changed, 64 insertions(+), 46 deletions(-) diff --git a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/eyetracker.py b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/eyetracker.py index d08e1bfac92..ea821e598d1 100644 --- a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/eyetracker.py +++ b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/eyetracker.py @@ -438,17 +438,20 @@ def runSetupProcedure(self): self._waitForAck('CALIBRATE_DELAY', timeout=2.0) self._gp3set('CALIBRATE_SHOW', STATE=1) - self._gp3set('CALIBRATE_START', STATE=1) - cal_result = self._waitForAck('CALIB_RESULT', timeout=30.0) + self._gp3set('CALIBRATE_START', STATE=1) - if cal_result: - #print2err("GP3 calibration done.") - #print2err("Closing GP3 calibration window....") + cal_result = self._waitForAck('CALIB_RESULT', timeout=30.0) + if cal_result: self._gp3set('CALIBRATE_SHOW', STATE=0) self._gp3get('CALIBRATE_RESULT_SUMMARY') - - cal_result['SUMMARY']=self._waitForAck('CALIBRATE_RESULT_SUMMARY') - #print2err("CAL_RESULT: ",cal_result) + del cal_result['type'] + del cal_result['ID'] + + cal_summary = self._waitForAck('CALIBRATE_RESULT_SUMMARY') + del cal_summary['type'] + del cal_summary['ID'] + cal_result['SUMMARY'] = cal_summary + return cal_result def _poll(self): diff --git a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyeLinkCoreGraphicsIOHubPsychopy.py b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyeLinkCoreGraphicsIOHubPsychopy.py index b0515427442..4200c380e08 100644 --- a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyeLinkCoreGraphicsIOHubPsychopy.py +++ b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyeLinkCoreGraphicsIOHubPsychopy.py @@ -503,6 +503,8 @@ def _handleEvent(self, event): self.state = 'validation' elif char == 'a': pylink_key = ord(char) + elif char == 'o': + pylink_key = ord(char) elif char == 'pageup': pylink_key = pylink.PAGE_UP elif char == 'pagedown': diff --git a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyetracker.py b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyetracker.py index 4abb47bdbdc..647b2eba88a 100644 --- a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyetracker.py +++ b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyetracker.py @@ -400,9 +400,6 @@ def runSetupProcedure(self): * ESC can be pressed at any time to exit the current state of the setup procedure and return to the initial blank screen state. * O = Exit the runSetupProcedure method and continue with the experiment. """ -# if starting_state != EyeTrackerConstants.DEFAULT_SETUP_PROCEDURE: -# printExceptionDetailsToStdErr() - try: from . import eyeLinkCoreGraphicsIOHubPsychopy EyeLinkCoreGraphicsIOHubPsychopy = eyeLinkCoreGraphicsIOHubPsychopy.EyeLinkCoreGraphicsIOHubPsychopy @@ -415,8 +412,7 @@ def runSetupProcedure(self): targetOuterDiameter = circle_attributes.get('outer_diameter') targetInnerDiameter = circle_attributes.get('inner_diameter') - genv = EyeLinkCoreGraphicsIOHubPsychopy( - self, + genv = EyeLinkCoreGraphicsIOHubPsychopy(self, targetForegroundColor=targetForegroundColor, targetBackgroundColor=targetBackgroundColor, screenColor=screenColor, @@ -424,13 +420,38 @@ def runSetupProcedure(self): targetInnerDiameter=targetInnerDiameter) pylink.openGraphicsEx(genv) + self._eyelink.doTrackerSetup() + + m = self._eyelink.getCalibrationMessage() + r = self._eyelink.getCalibrationResult() + + # from pylink docs, getCalibrationResult should return: + # + # NO_REPLY if calibration not completed yet. + # OK_RESULT(0) if success. + # ABORT_REPLY(27) if 'ESC' key aborted calibration. + # -1 if calibration failed. + # 1 if poor calibration or excessive validation error. + # + # but it returns 1000. ?? + # + # getCalibrationResult returns "calibration_result: 0", where + # 0 == OK_RESULT == successful calibration. + # TODO: Test if eyelink returns different calibration_result if calibration fails. + reply = dict(message=m, result=r) + # reply is returning: + # {'message': 'calibration_result: 0', 'result': 1000} + # on a successful calibration. + # TODO: Parse into more meaningful message if possible. + genv._unregisterEventMonitors() genv.clearAllEventBuffers() genv.window.close() del genv.window del genv - return EyeTrackerConstants.EYETRACKER_OK + + return reply except Exception as e: printExceptionDetailsToStdErr() @@ -1455,8 +1476,7 @@ def _addCommandFunctions(self): self._COMMAND_TO_FUNCTION['doDriftCorrect'] = _doDriftCorrect self._COMMAND_TO_FUNCTION['eyeAvailable'] = _eyeAvailable self._COMMAND_TO_FUNCTION['enableDummyOpen'] = _dummyOpen - self._COMMAND_TO_FUNCTION[ - 'getLastCalibrationInfo'] = _getCalibrationMessage + self._COMMAND_TO_FUNCTION['getLastCalibrationInfo'] = _getCalibrationMessage self._COMMAND_TO_FUNCTION['applyDriftCorrect'] = _applyDriftCorrect self._COMMAND_TO_FUNCTION['setIPAddress'] = _setIPAddress self._COMMAND_TO_FUNCTION['setLockEye'] = _setLockEye @@ -1580,7 +1600,7 @@ def _getSamplingRate(self): def _getTrackerMode(*args, **kwargs): try: - r = pylink.getEyeLink().getTrackerMode() + r = pylink.getEYELINK().getTrackerMode() return _EYELINK_HOST_MODES[r] except Exception as e: printExceptionDetailsToStdErr() @@ -1590,7 +1610,7 @@ def _doDriftCorrect(*args, **kwargs): try: if len(args) == 4: x, y, draw, allow_setup = args - r = pylink.getEyeLink().doDriftCorrect(x, y, draw, allow_setup) + r = pylink.getEYELINK().doDriftCorrect(x, y, draw, allow_setup) return r else: print2err('doDriftCorrect requires 4 parameters, received: ', args) @@ -1601,7 +1621,7 @@ def _doDriftCorrect(*args, **kwargs): def _applyDriftCorrect(): try: - r = pylink.getEyeLink().applyDriftCorrect() + r = pylink.getEYELINK().applyDriftCorrect() if r == 0: return True else: @@ -1612,7 +1632,7 @@ def _applyDriftCorrect(): def _eyeAvailable(*args, **kwargs): try: - r = pylink.getEyeLink().eyeAvailable() + r = pylink.getEYELINK().eyeAvailable() if r == 0: return EyeTrackerConstants.getName(EyeTrackerConstants.LEFT_EYE) elif r == 1: @@ -1627,7 +1647,7 @@ def _eyeAvailable(*args, **kwargs): def _dummyOpen(*args, **kwargs): try: - r = pylink.getEyeLink().dummy_open() + r = pylink.getEYELINK().dummy_open() return r except Exception as e: printExceptionDetailsToStdErr() @@ -1635,15 +1655,13 @@ def _dummyOpen(*args, **kwargs): def _getCalibrationMessage(*args, **kwargs): try: - m = pylink.getEyeLink().getCalibrationMessage() - r = pylink.getEyeLink().getCalibrationResult() + m = pylink.getEYELINK().getCalibrationMessage() + r = pylink.getEYELINK().getCalibrationResult() if r in _eyeLinkCalibrationResultDict: r = _eyeLinkCalibrationResultDict[r] else: r = 'NO_REPLY' - rString = 'Last Calibration Message:\n{0}\n\nLastCalibrationResult:\n{1}'.format( - m, r) - return rString + return dict(message=m, result=r) except Exception as e: printExceptionDetailsToStdErr() @@ -1652,7 +1670,7 @@ def _setIPAddress(*args, **kwargs): try: if len(args) == 1: ipString = args[0] - r = pylink.getEyeLink().setAddress(ipString) + r = pylink.getEYELINK().setAddress(ipString) if r == 0: return True return [ @@ -1667,7 +1685,7 @@ def _setLockEye(*args, **kwargs): try: if len(args) == 1: enable = args[0] - r = pylink.getEyeLink().sendCommand('lock_eye_after_calibration %d' % (enable)) + r = pylink.getEYELINK().sendCommand('lock_eye_after_calibration %d' % (enable)) return r return ['EYE_TRACKER_ERROR', 'setLockEye', 'One argument is required, bool type.'] diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/eyetracker.py b/psychopy/iohub/devices/eyetracker/hw/tobii/eyetracker.py index b67ee10bedc..7b678f423fa 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/eyetracker.py +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/eyetracker.py @@ -185,11 +185,9 @@ def runSetupProcedure(self): from .tobiiCalibrationGraphics import TobiiPsychopyCalibrationGraphics calibration_properties = self.getConfiguration().get('calibration') - screenColor = calibration_properties.get( - 'screen_background_color') # [r,g,b] of screen + screenColor = calibration_properties.get('screen_background_color') # [r,g,b] of screen - genv = TobiiPsychopyCalibrationGraphics( - self, screenColor=screenColor) + genv = TobiiPsychopyCalibrationGraphics(self, screenColor=screenColor) calibrationOK = genv.runCalibration() diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiCalibrationGraphics.py b/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiCalibrationGraphics.py index 66ca4e40c45..4e84df20279 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiCalibrationGraphics.py +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiCalibrationGraphics.py @@ -228,8 +228,8 @@ def _createStim(self): self.marker_heights = (-sh / 2.0 * .7, -sh / 2.0 * .75, -sh / 2.0 * .8, -sh / 2.0 * .7, -sh / 2.0 * .75, -sh / 2.0 * .8) - bar_vertices = [-hbox_bar_length / 2, -hbox_bar_height / 2], [hbox_bar_length / 2, -hbox_bar_height / - 2], [hbox_bar_length / 2, hbox_bar_height / 2], [-hbox_bar_length / 2, hbox_bar_height / 2] + bar_vertices = ([-hbox_bar_length / 2, -hbox_bar_height / 2], [hbox_bar_length / 2, -hbox_bar_height / 2], + [hbox_bar_length / 2, hbox_bar_height / 2], [-hbox_bar_length / 2, hbox_bar_height / 2]) self.feedback_resources = OrderedDict() @@ -338,12 +338,9 @@ def runCalibration(self): if not continue_calibration: return False - auto_pace = self._eyetrackerinterface.getConfiguration()['calibration'][ - 'auto_pace'] - pacing_speed = self._eyetrackerinterface.getConfiguration()['calibration'][ - 'pacing_speed'] - randomize_points = self._eyetrackerinterface.getConfiguration()['calibration'][ - 'randomize'] + auto_pace = self._eyetrackerinterface.getConfiguration()['calibration']['auto_pace'] + pacing_speed = self._eyetrackerinterface.getConfiguration()['calibration']['pacing_speed'] + randomize_points = self._eyetrackerinterface.getConfiguration()['calibration']['randomize'] cal_target_list = self.CALIBRATION_POINT_LIST[1:-1] if randomize_points is True: @@ -410,8 +407,9 @@ def waitingForNextTargetTime(): self.clearCalibrationWindow() self.clearAllEventBuffers() + calibration_result = None if _quit: - return False + return calibration_result self._lastCalibrationOK = False if calibration: @@ -422,20 +420,19 @@ def waitingForNextTargetTime(): self._lastCalibrationOK = False calibration.leave_calibration_mode() calibration = None - - - + if self._lastCalibrationOK is False: instuction_text = 'Calibration Failed. Options: SPACE: Re-run Calibration; ESCAPE: Exit Setup' continue_method = self.showSystemSetupMessageScreen( instuction_text, True, msg_types=['SPACE_KEY_ACTION', 'QUIT']) if continue_method is False: return self.runCalibration() - return False + return calibration_result instuction_text = "Calibration Passed. PRESS 'SPACE' KEY TO CONTINUE." self.showSystemSetupMessageScreen(instuction_text, True, msg_types=['SPACE_KEY_ACTION']) - return True + + return calibration_result def clearCalibrationWindow(self): self.window.flip(clearBuffer=True) From 2f9d8eb27fc4e53f28a86bb4b6dcc631c11af53a Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Wed, 7 Apr 2021 13:42:06 -0300 Subject: [PATCH 25/26] FF: added read-only .results property to validation class. --- .../coder/iohub/eyetracking/validation.py | 17 ++++++------ .../client/eyetracker/validation/procedure.py | 26 ++++++++++++------- 2 files changed, 26 insertions(+), 17 deletions(-) diff --git a/psychopy/demos/coder/iohub/eyetracking/validation.py b/psychopy/demos/coder/iohub/eyetracking/validation.py index b72a36702c4..0788fb98170 100644 --- a/psychopy/demos/coder/iohub/eyetracking/validation.py +++ b/psychopy/demos/coder/iohub/eyetracking/validation.py @@ -63,15 +63,16 @@ terminate_key='escape') # Run the validation procedure. run() does not return until the validation is complete. - validation_results = validation_proc.run() - if validation_results: + validation_proc.run() + if validation_proc.results: + results = validation_proc.results print("++++ Validation Results ++++") - print("Passed:", validation_results['passed']) - print("failed_pos_count:", validation_results['positions_failed_processing']) - print("Units:", validation_results['reporting_unit_type']) - print("min_error:", validation_results['min_error']) - print("max_error:", validation_results['max_error']) - print("mean_error:", validation_results['mean_error']) + print("Passed:", results['passed']) + print("failed_pos_count:", results['positions_failed_processing']) + print("Units:", results['reporting_unit_type']) + print("min_error:", results['min_error']) + print("max_error:", results['max_error']) + print("mean_error:", results['mean_error']) else: print("Validation Aborted by User.") io.quit() diff --git a/psychopy/iohub/client/eyetracker/validation/procedure.py b/psychopy/iohub/client/eyetracker/validation/procedure.py index b1cf9ae97df..40b0f6fd8be 100644 --- a/psychopy/iohub/client/eyetracker/validation/procedure.py +++ b/psychopy/iohub/client/eyetracker/validation/procedure.py @@ -159,7 +159,7 @@ def __init__(self, win=None, target=None, positions=None, target_animation={}, r save_results_screen=True) # Run the validation procedure. Method does not return until the validation is complete. - validation_results = validation_proc.run() + _validation_results = validation_proc.run() See the validation.py demo in demos.coder.iohub.eyetracking for a more complete example. @@ -206,7 +206,7 @@ def __init__(self, win=None, target=None, positions=None, target_animation={}, r self.show_results_screen = show_results_screen self.results_in_degrees = results_in_degrees self.save_results_screen = save_results_screen - self.validation_results = None + self._validation_results = None if storeeventsfor is None: storeeventsfor = [self.io.devices.keyboard, self.io.devices.mouse, @@ -240,14 +240,14 @@ def run(self): self.showIntroScreen() if self.terminate_key and self.terminate_key in keyboard.waitForReleases(keys=[' ', self.terminate_key]): print("Escape key pressed. Exiting validation") - self.validation_results = None + self._validation_results = None return # Perform Validation..... terminate = not self.targetsequence.display(**self.animation_params) if terminate: print("Escape key pressed. Exiting validation") - self.validation_results = None + self._validation_results = None return self.io.clearEvents('all') @@ -268,7 +268,7 @@ def run(self): self.terminate_key, self.targetsequence.gaze_cursor_key]) - return self.validation_results + return self._validation_results def showResultsScreen(self): self.drawResultScreen() @@ -294,6 +294,14 @@ def showIntroScreen(self): self.intro_text_stim.draw() return self.win.flip() + @property + def results(self): + """ + See getValidationResults(). + :return: + """ + return self._validation_results + def getValidationResults(self): """ Return the validation results dict for the last validation run. If a validation as not yet been run(), @@ -359,7 +367,7 @@ def getValidationResults(self): :return: validation results dict. """ - return self.validation_results + return self._validation_results def _createValidationResults(self): """ @@ -368,7 +376,7 @@ def _createValidationResults(self): :return: dict """ - self.validation_results = None + self._validation_results = None sample_array = self.targetsequence.getSampleMessageData() target_positions_used = self.targetsequence.positions.getPositions() @@ -529,8 +537,8 @@ def _createValidationResults(self): self.io.sendMessageEvent('Validation Report Complete', 'VALIDATION') - self.validation_results = results - return self.validation_results + self._validation_results = results + return self._validation_results def _generateImageName(self): import datetime From 2498b9b6ebaefae2364fdce96d8b1fbdc110e078 Mon Sep 17 00:00:00 2001 From: Sol Simpson Date: Mon, 12 Apr 2021 09:23:00 -0300 Subject: [PATCH 26/26] FF: updated iohub getDeviceNames now returns list of (manufacturer_name, iohub_device_name) pairs. getDeviceParams now returns data type for list element instead of 'list' --- psychopy/iohub/util/__init__.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/psychopy/iohub/util/__init__.py b/psychopy/iohub/util/__init__.py index 2b41abdb45d..204c3727d57 100644 --- a/psychopy/iohub/util/__init__.py +++ b/psychopy/iohub/util/__init__.py @@ -219,24 +219,30 @@ def getDeviceDefaultConfig(device_name, builder_hides=True): _iohub2builderInputType = dict(IOHUB_STRING='single', IOHUB_BOOL='bool', IOHUB_FLOAT='single', IOHUB_INT='single', IOHUB_LIST=('choice','multi'), IOHUB_COLOR='color', IOHUB_IP_ADDRESS_V4='single') -def getDeviceNames(device_name="eyetracker.hw"): +def getDeviceNames(device_name="eyetracker.hw", get_paths=True): """ - Return a list of iohub eye tracker device names, as would be used as keys to launchHubServer. + Return a list of iohub eye tracker device names, as would be used as keys to launchHubServer. If get_paths is true, + return both device manufacturer name (for display in builder) as well as iohub device name. Example: eyetrackers = getDeviceNames() print(eyetrackers) Output: - ['eyetracker.hw.gazepoint.gp3.EyeTracker', 'eyetracker.hw.sr_research.eyelink.EyeTracker', - 'eyetracker.hw.tobii.EyeTracker'] + [('GazePoint', 'eyetracker.hw.gazepoint.gp3.EyeTracker'), + ('MouseGaze', 'eyetracker.hw.mouse.EyeTracker'), + ('SR Research Ltd', 'eyetracker.hw.sr_research.eyelink.EyeTracker'), + ('Tobii Technology', 'eyetracker.hw.tobii.EyeTracker')] """ names = [] dconfigs = getDeviceDefaultConfig(device_name) for dcfg in dconfigs: - d_name = tuple(dcfg.keys())[0] - #d_name = d_name[:d_name.rfind('.')] - names.append(d_name) + d_path = tuple(dcfg.keys())[0] + d_config = tuple(dcfg.values())[0] + if get_paths is False: + names.append(d_path) + else: + names.append((d_config.get('manufacturer_name'), d_path)) return names def getDeviceFile(device_name, file_name): @@ -359,7 +365,7 @@ def settings2Params(parent_list, settings): slabel = slabel+k.replace("_", " ").title() if isinstance(sconfig_data, dict): - iohub_type, type_constraints =list(sconfig_data.items())[0] + iohub_type, type_constraints = list(sconfig_data.items())[0] builderValType = _iohub2builderValType[iohub_type] builderInputType = _iohub2builderInputType[iohub_type] valid_values = None @@ -369,6 +375,7 @@ def settings2Params(parent_list, settings): builderInputType = builderInputType[0] else: builderInputType = builderInputType[1] + builderValType = type(valid_values[0]) if valid_values: nv = dict(valType=builderValType, inputType=builderInputType, defaultVal=v, allowedVals=valid_values, hint=shint, label=slabel) @@ -376,7 +383,7 @@ def settings2Params(parent_list, settings): nv = dict(valType=builderValType, inputType=builderInputType, defaultVal=v, hint=shint, label=slabel) elif isinstance(sconfig_data, list): - nv = dict(valType='list', inputType='static', defaultVal=v, hint=shint, label=slabel) + nv = dict(valType=type(v), inputType='static', defaultVal=v, hint=shint, label=slabel) elif sconfig_data in _iohub2builderValType.keys(): nv = dict(valType=_iohub2builderValType[sconfig_data], inputType=_iohub2builderInputType[sconfig_data], defaultVal=v,