diff --git a/docs/source/api/iohub/device/default_yaml_configs/default_mousegaze_eyetracker.yaml b/docs/source/api/iohub/device/default_yaml_configs/default_mousegaze_eyetracker.yaml new file mode 100644 index 00000000000..1760d2d68b4 --- /dev/null +++ b/docs/source/api/iohub/device/default_yaml_configs/default_mousegaze_eyetracker.yaml @@ -0,0 +1,41 @@ +eyetracker.hw.mouse.EyeTracker: + # True = Automatically start reporting events for this device when the experiment starts. + # False = Do not start reporting events for this device until enableEventReporting(True) + # is called for the device. + auto_report_events: False + + # Should eye tracker events be saved to the ioHub DataStore file when the device + # is recording data ? + save_events: True + + # Should eye tracker events be sent to the Experiment process when the device + # is recording data ? + stream_events: True + + # How many eye events (including samples) should be saved in the ioHub event buffer before + # old eye events start being replaced by new events. When the event buffer reaches + # the maximum event length of the buffer defined here, older events will start to be dropped. + event_buffer_length: 1024 + runtime_settings: + # How many samples / second should Mousegaze Generate. + # 50 or 100 hz are supported. + sampling_rate: 50 + + # MouseGaze always generates Monocular Right eye samples. + track_eyes: RIGHT_EYE + + controls: + # Mouse Button used to make a MouseGaze position change. + # LEFT_BUTTON, MIDDLE_BUTTON, RIGHT_BUTTON. + move: RIGHT_BUTTON + + # Mouse Button(s) used to make MouseGaze generate a blink event. + # LEFT_BUTTON, MIDDLE_BUTTON, RIGHT_BUTTON. + blink: [LEFT_BUTTON, RIGHT_BUTTON] + + # Threshold for saccade generation. Specified in visual degrees. + saccade_threshold: 0.5 + + # MouseGaze creates (minimally populated) fixation, saccade, and blink events. + monitor_event_types: [MonocularEyeSampleEvent, FixationStartEvent, FixationEndEvent, SaccadeStartEvent, SaccadeEndEvent, BlinkStartEvent, BlinkEndEvent] + diff --git a/docs/source/api/iohub/device/default_yaml_configs/default_tobii_eyetracker.yaml b/docs/source/api/iohub/device/default_yaml_configs/default_tobii_eyetracker.yaml index b7b1f7e6d7f..c1b60d96c59 100644 --- a/docs/source/api/iohub/device/default_yaml_configs/default_tobii_eyetracker.yaml +++ b/docs/source/api/iohub/device/default_yaml_configs/default_tobii_eyetracker.yaml @@ -36,12 +36,7 @@ eyetracker.hw.tobii.EyeTracker: serial_number: calibration: - # Should the PsychoPy Window created by the PsychoPy Process be minimized - # before displaying the Calibration Window created by the ioHub Process. - # - minimize_psychopy_win: False - - # The Tobii ioHub Common Eye Tracker Interface currently support + # The Tobii ioHub Common Eye Tracker Interface currently support # a 3, 5 and 9 point calibration mode. # THREE_POINTS,FIVE_POINTS,NINE_POINTS # @@ -150,7 +145,7 @@ eyetracker.hw.tobii.EyeTracker: runtime_settings: # The supported sampling rates for Tobii are model dependent. - # Using a defualt of 60 Hz, with the assumption it is the most common. + # Using a defualt of 60 Hz. sampling_rate: 60 # Tobii implementation supports BINOCULAR tracking mode only. diff --git a/docs/source/api/iohub/device/eyetracker.rst b/docs/source/api/iohub/device/eyetracker.rst index fd27b1a88f5..67c02ca0787 100644 --- a/docs/source/api/iohub/device/eyetracker.rst +++ b/docs/source/api/iohub/device/eyetracker.rst @@ -17,4 +17,5 @@ The following eye trackers are currently supported by iohub. GazePoint SR Research - Tobii \ No newline at end of file + Tobii + MouseGaze (Simulated Eye Tracker) \ No newline at end of file diff --git a/docs/source/api/iohub/device/eyetracker_interface/MouseGaze_Implementation_Notes.rst b/docs/source/api/iohub/device/eyetracker_interface/MouseGaze_Implementation_Notes.rst new file mode 100644 index 00000000000..6d16c73301d --- /dev/null +++ b/docs/source/api/iohub/device/eyetracker_interface/MouseGaze_Implementation_Notes.rst @@ -0,0 +1,150 @@ +########## +MouseGaze +########## + +MouseGaze simulates an eye tracker using the computer Mouse. + +**Platforms:** + +* Windows 7 / 10 +* Linux +* macOS + +**Required Python Version:** + +* Python 3.6 + + +**Supported Models:** + +* Any Mouse. ;) + +Additional Software Requirements +################################# + +None + +EyeTracker Class +################ + +.. autoclass:: psychopy.iohub.devices.eyetracker.hw.mouse.EyeTracker() + :members: runSetupProcedure, setRecordingState, enableEventReporting, isRecordingEnabled, getEvents, clearEvents, getLastSample, getLastGazePosition, getPosition, trackerTime, trackerSec, getConfiguration + +Supported Event Types +##################### + +MouseGaze generates monocular eye samples. A MonocularEyeSampleEvent +is created every 10 or 20 msec depending on the sampling_rate set +for the device. + +The following fields of the MonocularEyeSample event are supported: + +.. autoclass:: psychopy.iohub.devices.eyetracker.BinocularEyeSampleEvent(object) + + .. attribute:: time + + time of event, in sec.msec format, using psychopy timebase. + + .. attribute:: gaze_x + + The horizontal position of MouseGaze on the computer screen, + in Display Coordinate Type Units. Calibration must be done prior + to reading (meaningful) gaze data. + Uses Gazepoint LPOGX field. + + .. attribute:: gaze_y + + The vertical position of MouseGaze on the computer screen, + in Display Coordinate Type Units. Calibration must be done prior + to reading (meaningful) gaze data. + Uses Gazepoint LPOGY field. + + .. attribute:: left_pupil_measure_1 + + MouseGaze pupil diameter, static at 5 mm. + + .. attribute:: status + + Indicates if eye sample contains 'valid' position data. + 0 = MouseGaze position is valid. + 2 = MouseGaze position is missing (in simulated blink). + + +MouseGaze also creates basic fixation, saccade, and blink events +based on mouse event data. + +.. autoclass:: psychopy.iohub.devices.eyetracker.FixationStartEvent(object) + + .. attribute:: time + + time of event, in sec.msec format, using psychopy timebase. + + .. attribute:: eye + + EyeTrackerConstants.RIGHT_EYE. + + .. attribute:: gaze_x + + The horizontal 'eye' position on the computer screen + at the start of the fixation. Units are same as Window. + + + .. attribute:: gaze_y + + The vertical eye position on the computer screen + at the start of the fixation. Units are same as Window. + +.. autoclass:: psychopy.iohub.devices.eyetracker.FixationEndEvent(object) + + .. attribute:: time + + time of event, in sec.msec format, using psychopy timebase. + + .. attribute:: eye + + EyeTrackerConstants.RIGHT_EYE. + + .. attribute:: start_gaze_x + + The horizontal 'eye' position on the computer screen + at the start of the fixation. Units are same as Window. + + + .. attribute:: start_gaze_y + + The vertical 'eye' position on the computer screen + at the start of the fixation. Units are same as Window. + + .. attribute:: end_gaze_x + + The horizontal 'eye' position on the computer screen + at the end of the fixation. Units are same as Window. + + + .. attribute:: end_gaze_y + + The vertical 'eye' position on the computer screen + at the end of the fixation. Units are same as Window. + + .. attribute:: average_gaze_x + + Average calibrated horizontal eye position during the fixation, + specified in Display Units. + + .. attribute:: average_gaze_y + + Average calibrated vertical eye position during the fixation, + specified in Display Units. + + .. attribute:: duration + + Duration of the fixation in sec.msec format. + +Default Device Settings +####################### + +.. literalinclude:: ../default_yaml_configs/default_mousegaze_eyetracker.yaml + :language: yaml + + +**Last Updated:** March, 2021 + diff --git a/psychopy/demos/coder/iohub/eyetracking/gcCursor/iohub_config.yaml b/psychopy/demos/coder/iohub/eyetracking/gcCursor/iohub_config.yaml index 1ffb5ed03d4..d98f17b7c3c 100644 --- a/psychopy/demos/coder/iohub/eyetracking/gcCursor/iohub_config.yaml +++ b/psychopy/demos/coder/iohub/eyetracking/gcCursor/iohub_config.yaml @@ -114,7 +114,7 @@ monitor_devices: # # name will be considered as possible candidates for connection. # # If you only have one Tobii system connected to the computer, # # this field can just be left empty. -# model_name: Any Pro Model +# model_name: # # # The serial number of the Tobii device that you wish to connect to. # # If set, only the Tobii system matching that serial number will be diff --git a/psychopy/demos/coder/iohub/eyetracking/selectTracker/eyetracker_configs/tobii_config.yaml b/psychopy/demos/coder/iohub/eyetracking/selectTracker/eyetracker_configs/tobii_config.yaml index 26a11cc8bb9..be930ca955b 100644 --- a/psychopy/demos/coder/iohub/eyetracking/selectTracker/eyetracker_configs/tobii_config.yaml +++ b/psychopy/demos/coder/iohub/eyetracking/selectTracker/eyetracker_configs/tobii_config.yaml @@ -11,7 +11,7 @@ monitor_devices: # The model name of the Tobii device that you wish to connect to can be specified here, # and only Tobii systems matching that model name will be considered as possible candidates for connection. # If you only have one Tobii system connected to the computer, this field can just be left empty. - model_name: Any Pro Model + model_name: # The serial number of the Tobii device that you wish to connect to can be specified here, # and only the Tobii system matching that serial number will be connected to, if found. @@ -70,6 +70,5 @@ monitor_devices: expansion_speed: 30.0 # exapands at 30.0 pix / sec contract_only: True runtime_settings: - # The supported sampling rates for Tobii are model dependent. - # Using a default of 60 Hz, with the assumption it is the most common. - sampling_rate: 120 + # The supported sampling rates for Tobii are model dependent. + sampling_rate: 60 diff --git a/psychopy/demos/coder/iohub/eyetracking/simple.py b/psychopy/demos/coder/iohub/eyetracking/simple.py index 835340b2cbb..b0476a8d7d3 100644 --- a/psychopy/demos/coder/iohub/eyetracking/simple.py +++ b/psychopy/demos/coder/iohub/eyetracking/simple.py @@ -9,11 +9,12 @@ from psychopy import core, visual from psychopy.iohub import launchHubServer, EventConstants + # Eye tracker to use ('mouse', 'eyelink', 'gazepoint', or 'tobii') TRACKER = 'mouse' eyetracker_config = dict(name='tracker') -devices_config = {'Display': {'reporting_unit_type': 'pix', 'device_number': 0}} +devices_config = {} if TRACKER == 'eyelink': eyetracker_config['model_name'] = 'EYELINK 1000 DESKTOP' eyetracker_config['simulation_mode'] = False @@ -28,104 +29,109 @@ devices_config['eyetracker.hw.mouse.EyeTracker'] = eyetracker_config else: print("{} is not a valid TRACKER name; please use 'mouse', 'eyelink', 'gazepoint', or 'tobii'.".format(TRACKER)) + core.quit() # Number if 'trials' to run in demo TRIAL_COUNT = 2 # Maximum trial time / time timeout T_MAX = 60.0 - -if devices_config: - # Since no experiment or session code is given, no iohub hdf5 file - # will be saved, but device events are still available at runtime. - io = launchHubServer(**devices_config) - - # Get some iohub devices for future access. - keyboard = io.getDevice('keyboard') - display = io.getDevice('display') - tracker = io.getDevice('tracker') - - # print("display: ", display.getCoordinateType()) - - # run eyetracker calibration - tracker.runSetupProcedure() - - win = visual.Window(display.getPixelResolution(), - units=display.getCoordinateType(), - fullscr=True, - allowGUI=False - ) - - win.setMouseVisible(True) - - gaze_ok_region = visual.Circle(win, lineColor='black', radius=300, units='pix') - - gaze_dot = visual.GratingStim(win, tex=None, mask='gauss', pos=(0, 0), - size=(40, 40), color='green', units='pix') - - text_stim_str = 'Eye Position: %.2f, %.2f. In Region: %s\n' - text_stim_str += 'Press space key to start next trial.' - missing_gpos_str = 'Eye Position: MISSING. In Region: No\n' - missing_gpos_str += 'Press space key to start next trial.' - text_stim = visual.TextStim(win, text=text_stim_str, - pos=[0, 0], height=24, - color='black', units='pix', - wrapWidth=win.size[0] * .9) - - # Run Trials..... - t = 0 - while t < TRIAL_COUNT: - io.clearEvents() - tracker.setRecordingState(True) - run_trial = True - tstart_time = core.getTime() - while run_trial is True: - # Get the latest gaze position in dispolay coord space.. - gpos = tracker.getLastGazePosition() - for evt in tracker.getEvents(): - if evt.type != EventConstants.MONOCULAR_EYE_SAMPLE: - print(evt) - # Update stim based on gaze position - valid_gaze_pos = isinstance(gpos, (tuple, list)) - gaze_in_region = valid_gaze_pos and gaze_ok_region.contains(gpos) - if valid_gaze_pos: - # If we have a gaze position from the tracker, update gc stim - # and text stim. - if gaze_in_region: - gaze_in_region = 'Yes' - else: - gaze_in_region = 'No' - text_stim.text = text_stim_str % (gpos[0], gpos[1], gaze_in_region) - - gaze_dot.setPos(gpos) +win = visual.Window((1920, 1080), + units='pix', + fullscr=True, + allowGUI=False, + monitor='55w_60dist' + ) + +win.setMouseVisible(False) + + +# Since no experiment or session code is given, no iohub hdf5 file +# will be saved, but device events are still available at runtime. +io = launchHubServer(window=win, **devices_config) + + +# Get some iohub devices for future access. +keyboard = io.getDevice('keyboard') +display = io.getDevice('display') +tracker = io.getDevice('tracker') + +win.winHandle.minimize() # minimize the PsychoPy window + +# run eyetracker calibration +tracker.runSetupProcedure() + +win.winHandle.maximize() # maximize the PsychoPy window +win.winHandle.activate() + +gaze_ok_region = visual.Circle(win, lineColor='black', radius=300, units='pix') + +gaze_dot = visual.GratingStim(win, tex=None, mask='gauss', pos=(0, 0), + size=(40, 40), color='green', units='pix') + +text_stim_str = 'Eye Position: %.2f, %.2f. In Region: %s\n' +text_stim_str += 'Press space key to start next trial.' +missing_gpos_str = 'Eye Position: MISSING. In Region: No\n' +missing_gpos_str += 'Press space key to start next trial.' +text_stim = visual.TextStim(win, text=text_stim_str, + pos=[0, 0], height=24, + color='black', units='pix', + wrapWidth=win.size[0] * .9) + +# Run Trials..... +t = 0 +while t < TRIAL_COUNT: + io.clearEvents() + tracker.setRecordingState(True) + run_trial = True + tstart_time = core.getTime() + while run_trial is True: + # Get the latest gaze position in dispolay coord space.. + gpos = tracker.getLastGazePosition() + for evt in tracker.getEvents(): + if evt.type != EventConstants.MONOCULAR_EYE_SAMPLE: + print(evt) + # Update stim based on gaze position + valid_gaze_pos = isinstance(gpos, (tuple, list)) + gaze_in_region = valid_gaze_pos and gaze_ok_region.contains(gpos) + if valid_gaze_pos: + # If we have a gaze position from the tracker, update gc stim + # and text stim. + if gaze_in_region: + gaze_in_region = 'Yes' else: - # Otherwise just update text stim - text_stim.text = missing_gpos_str - - # Redraw stim - gaze_ok_region.draw() - text_stim.draw() - if valid_gaze_pos: - gaze_dot.draw() - - # Display updated stim on screen. - flip_time = win.flip() - - # Check any new keyboard char events for a space key. - # If one is found, set the trial end variable. - # - if keyboard.getPresses(keys=' '): - run_trial = False - elif core.getTime()-tstart_time > T_MAX: - run_trial = False - win.flip() - # Current Trial is Done - # Stop eye data recording - tracker.setRecordingState(False) - t += 1 - - # All Trials are done - # End experiment - tracker.setConnectionState(False) - - io.quit() + gaze_in_region = 'No' + text_stim.text = text_stim_str % (gpos[0], gpos[1], gaze_in_region) + + gaze_dot.setPos(gpos) + else: + # Otherwise just update text stim + text_stim.text = missing_gpos_str + + # Redraw stim + gaze_ok_region.draw() + text_stim.draw() + if valid_gaze_pos: + gaze_dot.draw() + + # Display updated stim on screen. + flip_time = win.flip() + + # Check any new keyboard char events for a space key. + # If one is found, set the trial end variable. + # + if keyboard.getPresses(keys=' '): + run_trial = False + elif core.getTime()-tstart_time > T_MAX: + run_trial = False + win.flip() + # Current Trial is Done + # Stop eye data recording + tracker.setRecordingState(False) + t += 1 + +# All Trials are done +# End experiment +tracker.setConnectionState(False) + +io.quit() core.quit() diff --git a/psychopy/demos/coder/iohub/eyetracking/validation.py b/psychopy/demos/coder/iohub/eyetracking/validation.py new file mode 100644 index 00000000000..0788fb98170 --- /dev/null +++ b/psychopy/demos/coder/iohub/eyetracking/validation.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Example of performing eye tracker validation using the ioHub Common Eye Tracker interface +and the psychopy.iohub.client.eyetracker.validation.ValidationProcedure class. +""" +import time +from psychopy import visual +from psychopy.iohub import launchHubServer +from psychopy.iohub.client.eyetracker.validation import TargetStim, ValidationProcedure, PositionGrid + +if __name__ == "__main__": + # Create a default PsychoPy Window + # monitor *must* be the name of a valid PsychoPy Monitor config file. + win = visual.Window((1920, 1080), fullscr=True, allowGUI=False, monitor='55w_60dist') + + # Create ioHub Server config .... + sess_code = 'S_{0}'.format(int(time.mktime(time.localtime()))) + iohub_config = dict(experiment_code='validation_demo', session_code=sess_code) + # Add an eye tracker device + iohub_config['eyetracker.hw.mouse.EyeTracker'] = dict(name='tracker') + + # Start the ioHub process. + io = launchHubServer(window=win, **iohub_config) + + # Get the eye tracker device. + tracker = io.devices.tracker + + # Run eyetracker calibration + r = tracker.runSetupProcedure() + + # ValidationProcedure setup + + # Create a target stim. iohub.client.eyetracker.validation.TargetStim provides a standard doughnut style + # target. Or use any stim that has `.setPos()`, `.setRadius()`, and `.draw()` methods. + target_stim = TargetStim(win, radius=0.025, fillcolor=[.5, .5, .5], edgecolor=[-1, -1, -1], edgewidth=2, + dotcolor=[1, -1, -1], dotradius=0.005, units='norm', colorspace='rgb') + + # target_positions: Provide your own list of validation positions, + target_positions = [(0.0, 0.0), (0.85, 0.85), (-0.85, 0.0), (0.85, 0.0), (0.85, -0.85), (-0.85, 0.85), + (-0.85, -0.85), (0.0, 0.85), (0.0, -0.85)] + # or use the PositionGrid class to generate a set. + #target_positions = PositionGrid(bounds=[-.85, .85, .85, -.85], shape=(3, 3), firstposindex=4, repeatFirstPos=True) + + # Create a validation procedure, iohub must already be running with an + # eye tracker device, or errors will occur. + validation_proc = ValidationProcedure(win, + target=target_stim, + positions=target_positions, + randomize_positions=True, + target_animation=dict(velocity=1.0, + expandedscale=3.0, + expansionduration=0.2, + contractionduration=0.4), + accuracy_period_start=0.550, + accuracy_period_stop=.150, + show_intro_screen=True, + intro_text='Eye Tracker Validation Procedure.', + show_results_screen=True, + results_in_degrees=True, + save_results_screen=True, + toggle_gaze_cursor_key='g', + terminate_key='escape') + + # Run the validation procedure. run() does not return until the validation is complete. + validation_proc.run() + if validation_proc.results: + results = validation_proc.results + print("++++ Validation Results ++++") + print("Passed:", results['passed']) + print("failed_pos_count:", results['positions_failed_processing']) + print("Units:", results['reporting_unit_type']) + print("min_error:", results['min_error']) + print("max_error:", results['max_error']) + print("mean_error:", results['mean_error']) + else: + print("Validation Aborted by User.") + io.quit() diff --git a/psychopy/iohub/__init__.py b/psychopy/iohub/__init__.py index ab5bdc8dbce..6374cec4510 100644 --- a/psychopy/iohub/__init__.py +++ b/psychopy/iohub/__init__.py @@ -1,8 +1,7 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- - +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function diff --git a/psychopy/iohub/changes.txt b/psychopy/iohub/changes.txt index e37a04620bd..4a4c05aa74e 100644 --- a/psychopy/iohub/changes.txt +++ b/psychopy/iohub/changes.txt @@ -7,14 +7,20 @@ Changes made to iohub for 2021.2 Release - Do we need to list every specific setting change? - iohub can no longer be copied out of the psychopy package and used as a stand alone package in your site-packages folder. Change `import iohub` to `import psychopy.iohub` for example. -- Removed most device settings related to device hardware specifics (model_id, manfacture_date, etc) +- Removed most device settings related to device hardware specifics (model_id, manufacturer_date, etc) since they were never really used. - removed use of .iohpid - ** Changed datastore schema: - increased experiment and session data string max length - Added wintab device for Wacom based digital pens. (Windows only) - Added iohub MouseGaze eye tracker simulator. -- Added 'color_space' setting to Display (bridging to remove Display class from iohub) +- Added 'color_space' setting to Display settings. - Eyelink and tobii calibration gfx can now use same color space as window instead of always rgb255. - TODO: Retest all eyetracking examples with all eye trackers on all OS's -- Removed iohub/devices/display/unit_conversions.py. Moving to psychopy monitorutil functions. \ No newline at end of file +- Removed iohub/devices/display/unit_conversions.py. Moving to psychopy monitorutil functions. +- launchHubServer now accepts a psychopy window using the window kwarg. + iohub display info is updated using window information like .monitor, .colorSpace, .units, .screen +- If psychopy window (with monitor config) is passed to launchHubServer, no need for iohub Display config. +- Added common eye tracker interface validation procedure. Coder Demo: demos/coder/iohub/eyetracking/validation.py + - TODO: Retest all eye trackers +- Updated psychopy.iohub API docs. \ No newline at end of file diff --git a/psychopy/iohub/client/__init__.py b/psychopy/iohub/client/__init__.py index a6ac548bb0f..b2d718b102d 100644 --- a/psychopy/iohub/client/__init__.py +++ b/psychopy/iohub/client/__init__.py @@ -1,8 +1,7 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- - +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function from past.builtins import unicode diff --git a/psychopy/iohub/client/connect.py b/psychopy/iohub/client/connect.py index 61ef8fbb8c7..d02c270d49f 100644 --- a/psychopy/iohub/client/connect.py +++ b/psychopy/iohub/client/connect.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function import os diff --git a/psychopy/iohub/client/eyetracker/__init__.py b/psychopy/iohub/client/eyetracker/__init__.py new file mode 100644 index 00000000000..41998c05094 --- /dev/null +++ b/psychopy/iohub/client/eyetracker/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). \ No newline at end of file diff --git a/psychopy/iohub/client/eyetracker/validation/__init__.py b/psychopy/iohub/client/eyetracker/validation/__init__.py new file mode 100644 index 00000000000..05b0145c570 --- /dev/null +++ b/psychopy/iohub/client/eyetracker/validation/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). + +from .posgrid import PositionGrid +from .trigger import Trigger, KeyboardTrigger, DeviceEventTrigger, TimeTrigger +from .procedure import TargetStim, ValidationProcedure \ No newline at end of file diff --git a/psychopy/iohub/client/eyetracker/validation/posgrid.py b/psychopy/iohub/client/eyetracker/validation/posgrid.py new file mode 100644 index 00000000000..c294c2212b0 --- /dev/null +++ b/psychopy/iohub/client/eyetracker/validation/posgrid.py @@ -0,0 +1,274 @@ +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). + +import numpy as np +from psychopy.iohub.client import ioHubConnection + + +class PositionGrid(object): + def __init__(self, + bounds=None, + shape=None, # Defines the number of columns and rows of + # positions needed. If shape is an array of + # two elements, it defines the col,row shape + # for position layout. Position count will + # equal rows*cols. If shape is a single + # int, the position grid col,row shape will + # be shape x shape. + posCount=None, # Defines the number of positions to create + # without any col,row position constraint. + leftMargin=None, # Specify the minimum valid horz position. + rightMargin=None, # Limit horz positions to be < max horz + # position minus rightMargin. + topMargin=None, # Limit vert positions to be < max vert + # position minus topMargin. + bottomMargin=None, # Specify the minimum valid vert position. + scale=1.0, # Scale can be one or two numbers, each + # between 0.0 and 1.0. If a tuple is + # provided, it represents the horz, vert + # scale to be applied to window width, + # height. If a single number is + # given, the same scale will be applied to + # both window width and height. The scaled + # window size is centered on the original + # window size to define valid position area. + posList=None, # Provide an existing list of (x,y) + # positions. If posList is provided, the + # shape, posCount, margin and scale arg's + # are ignored. + noiseStd=None, # Add a random shift to each position based + # on a normal distribution with mean = 0.0 + # and sigma equal to noiseStd. Specify + # value based on units being used. + firstposindex=0, # Specify which position in the position + # list should be displayed first. This + # position is not effected by randomization. + repeatFirstPos=True # If the first position in the list should + # be provided as the last position as well, + # set to True. In this case, the number of + # positions returned will be position + # count + 1. False indicated the first + # position should not be repeated. + ): + """ + PositionGrid provides a flexible way to generate a set of x,y position + values within the boundaries of the psychopy window object provided. + + The class provides a set of arguments that represent commonly needed + constraints when creating a target position list, supporting a + variety of position arrangements. + + PositionGrid supports the len() function, and returns the number of + positions generated based on the supplied parameters. If repeatFirstPos + is true, len(posgrid) == number of unique positions + 1 (a repeat of the + first position value). + + PositionGrid is a generator, so the normal way to access the positions from + the class is to use a for loop or with statement: + + posgrid = PositionGrid(....) + for pos in posgrid: + # do something cool with the pos + print(pos) + + :param bounds: + :param shape: + :param posCount: + :param leftMargin: + :param rightMargin: + :param topMargin: + :param bottomMargin: + :param scale: + :param posList: + :param noiseStd: + :param firstposindex: + :param repeatFirstPos: + """ + self.posIndex = 0 + self.positions = None + self.posOffsets = None + + self.bounds = bounds + if self.bounds is None: + self.bounds = ioHubConnection.getActiveConnection().devices.display.getCoordBounds() + + winSize = self.bounds[2] - self.bounds[0], self.bounds[3] - self.bounds[1] + self.firstposindex = firstposindex + + self.repeatfirstpos = repeatFirstPos + + self.horzStd, self.vertStd = None, None + if noiseStd: + if hasattr(noiseStd, '__len__'): + self.horzStd, self.vertStd = noiseStd + else: + self.horzStd, self.vertStd = noiseStd, noiseStd + + horzScale, vertScale = None, None + if scale: + if hasattr(scale, '__len__'): + horzScale, vertScale = scale + else: + horzScale, vertScale = scale, scale + + rowCount, colCount = None, None + if shape: + if hasattr(shape, '__len__'): + colCount, rowCount = shape + else: + rowCount, colCount = shape, shape + + if posList: + # User has provided the target positions, use posList to set + # self.positions as array of x,y pairs. + if len(posList) == 2 and len(posList[0]) != 2 and len(posList[0]) == len(posList[1]): + # positions were provided in ((x1,x2,..,xn),(y1,y2,..,yn)) + # format + self.positions = np.column_stack((posList[0], posList[1])) + elif len(posList[0]) == 2: + self.positions = np.asarray(posList) + else: + raise ValueError('PositionGrid posList kwarg must be in ((x1,y1),(x2,y2),..,(xn,yn))' + ' or ((x1,x2,..,xn),(y1,y2,..,yn)) format') + + if self.positions is None and (posCount or (rowCount and colCount)): + # Auto generate position list based on criteria + # provided. + if winSize is not None: + pixw, pixh = winSize + xmin = 0.0 + xmax = 1.0 + ymin = 0.0 + ymax = 1.0 + + if leftMargin: + if leftMargin < pixw: + xmin = leftMargin / pixw + else: + raise ValueError('PositionGrid leftMargin kwarg must be < winSize[0]') + if rightMargin: + if rightMargin < pixw: + xmax = 1.0 - rightMargin / pixw + else: + raise ValueError('PositionGrid rightMargin kwarg must be < winSize[0]') + if topMargin: + if topMargin < pixh: + ymax = 1.0 - topMargin / pixh + else: + raise ValueError('PositionGrid topMargin kwarg must be < winSize[1]') + if bottomMargin: + if bottomMargin < pixh: + ymin = bottomMargin / pixh + else: + raise ValueError('PositionGrid bottomMargin kwarg must be < winSize[1]') + + if horzScale: + if 0.0 < horzScale <= 1.0: + xmin += (1.0 - horzScale) / 2.0 + xmax -= (1.0 - horzScale) / 2.0 + else: + raise ValueError('PositionGrid horzScale kwarg must be 0.0 > horzScale <= 1.0') + + if vertScale: + if 0.0 < vertScale <= 1.0: + ymin += (1.0 - vertScale) / 2.0 + ymax -= (1.0 - vertScale) / 2.0 + else: + raise ValueError('PositionGrid vertScale kwarg must be 0.0 > vertScale <= 1.0') + if posCount: + colCount = int(np.sqrt(posCount)) + rowCount = colCount + xps = np.random.uniform(xmin, xmax, colCount) * pixw - pixw / 2.0 + yps = np.random.uniform(ymin, ymax, rowCount) * pixh - pixh / 2.0 + else: + xps = np.linspace(xmin, xmax, colCount) * pixw - pixw / 2.0 + yps = np.linspace(ymin, ymax, rowCount) * pixh - pixh / 2.0 + + xps, yps = np.meshgrid(xps, yps) + self.positions = np.column_stack((xps.flatten(), yps.flatten())) + + else: + raise ValueError('PositionGrid posCount kwarg also requires winSize to be provided.') + + if self.positions is None: + raise AttributeError('PositionGrid is unable to generate positions based on the provided kwargs.') + + if self.firstposindex: + fpos = self.positions[self.firstposindex] + self.positions = np.delete(self.positions, self.firstposindex, 0) + self.positions = np.insert(self.positions, 0, fpos, 0) + + self._generatePosOffsets() + + def __len__(self): + if self.repeatfirstpos: + return len(self.positions) + 1 + else: + return len(self.positions) + + def randomize(self): + """ + Randomize the positions within the position list. If a first position + index was been provided, randomization only occurs for positions[1:]. + + This can be called multiple times if the same position list is being used + repeatedly and a random presentation order is needed. + + Each time randomize() is called, if noiseStd is != 0, a new set of + normally distributed offsets are created for the target positions. + """ + if not self.firstposindex: + np.random.shuffle(self.positions) + else: + firstpos = self.positions[0] + self.positions = np.delete(self.positions, 0, 0) + np.random.shuffle(self.positions) + self.positions = np.insert(self.positions, 0, firstpos, 0) + self._generatePosOffsets() + + def _generatePosOffsets(self): + """Create a new set of position displayment 'noise' based on the + noiseStd value given when the object was initialized.""" + horzPosOffsetList = np.zeros((len(self), 1)) + if self.horzStd: + horzPosOffsetList = np.random.normal(0.0, self.horzStd, len(self)) + vertPosOffsetList = np.zeros((len(self), 1)) + if self.vertStd: + vertPosOffsetList = np.random.normal(0.0, self.vertStd, len(self)) + self.posOffsets = np.column_stack((vertPosOffsetList, horzPosOffsetList)) + + def __iter__(self): + return self + + # Python 3 compatibility + def __next__(self): + return self.next() + + def next(self): + """Returns the next position in the list. Usually this method is not + called directly. Instead, positions are accessed by iterating over the + PositionGrid object. + + pos = PositionGrid(....) + + for p in pos: + # do something cool with it + pass + + """ + if self.posIndex < len(self.positions): + pos = self.positions[self.posIndex] + self.posOffsets[self.posIndex] + self.posIndex = self.posIndex + 1 + return pos + elif self.repeatfirstpos and self.posIndex == len(self.positions): + pos = self.positions[0] + self.posOffsets[0] + self.posIndex = self.posIndex + 1 + return pos + else: + self.posIndex = 0 + raise StopIteration() + + def getPositions(self): + return [p for p in self] diff --git a/psychopy/iohub/client/eyetracker/validation/procedure.py b/psychopy/iohub/client/eyetracker/validation/procedure.py new file mode 100644 index 00000000000..40b0f6fd8be --- /dev/null +++ b/psychopy/iohub/client/eyetracker/validation/procedure.py @@ -0,0 +1,1199 @@ +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). +""" +Eye Tracker Validation procedure using the ioHub common eye tracker interface. + +To use the validation process from within a Coder script: +* Create a target stim, using TargetStim, or any stim class that has a `.setPos()`, `setRadius()`, and `.draw()` method. +* Create a list of validation target positions. Use the `PositionGrid` class to help create a target position list. +* Create a ValidationProcedure class instance, providing the target stim and position list and other arguments + to define details of the validation procedure. +* Use `ValidationProcedure.run()` to perform the validation routine. +* Use `ValidationProcedure.getValidationResults()` to access information about each target position displayed and + the events collected during the each target validation period. + +See demos/coder/iohub/eyetracking/validation.py for a complete example. +""" +from weakref import proxy +import numpy as np +from time import sleep +import os +import sys +from matplotlib import pyplot as pl + +from psychopy import visual +from psychopy.iohub.util import win32MessagePump, normjoin +from psychopy.iohub.constants import EventConstants +from psychopy.iohub.client import ioHubConnection, Computer +from psychopy.tools.monitorunittools import convertToPix +from psychopy.tools.monitorunittools import pix2deg, deg2pix + +from psychopy.iohub.client.eyetracker.validation import PositionGrid, Trigger, KeyboardTrigger + +getTime = Computer.getTime + + +class TargetStim(object): + def __init__(self, win, radius=None, fillcolor=None, edgecolor=None, edgewidth=None, + dotcolor=None, dotradius=None, units=None, colorspace=None, opacity=1.0, contrast=1.0): + """ + TargetStim is a 'doughnut' style target graphic used during the validation procedure. + + :param win: Window being used for validation. + :param radius: The outer radius of the target. + :param fillcolor: The color used to fill the target body. + :param edgecolor: The color for the edge around the target. + :param edgewidth: The thickness of the target outer edge (always in pixels). + :param dotcolor: The color of the central target dot. + :param dotradius: The radius to use for the target dot. + :param units: The psychopy unit type of any size values. + :param colorspace: The psychopy color space of any colors. + :param opacity: The transparency of the target (0.0 - 1.0). + :param contrast: The contrast of the target stim. + """ + from weakref import proxy + self.win = proxy(win) + self.stim = [] + self.radius = radius + outer = visual.Circle(self.win, radius=radius, fillColor=fillcolor, lineColor=edgecolor, lineWidth=edgewidth, + edges=32, units=units, colorSpace=colorspace, opacity=opacity, + contrast=contrast, interpolate=True, autoLog=False) + self.stim.append(outer) + + if dotcolor and dotcolor != fillcolor: + centerdot = visual.Circle(self.win, radius=dotradius, fillColor=dotcolor, lineColor=dotcolor, + lineWidth=0.0, edges=32, interpolate=True, units=units, + colorSpace=colorspace, opacity=opacity, contrast=contrast, autoLog=False) + self.stim.append(centerdot) + + def setPos(self, pos): + """ + Set the center position of the target stim. Used during validation procedure to + change target position. + """ + for s in self.stim: + s.setPos(pos) + + def setRadius(self, r): + """ + Update the radius of the target stim. (Optionally) used during validation procedure to + expand / contract the target stim. + """ + self.stim[0].radius = r + + def draw(self): + """ + Draw the Target stim. + """ + for s in self.stim: + s.draw() + + def contains(self, p): + """ + Is point p contained within the Target Stim? + :param p: x, y position in stim units + :return: bool: True: p is within the stim + """ + return self.stim[0].contains(p) + + +class ValidationProcedure(object): + def __init__(self, win=None, target=None, positions=None, target_animation={}, randomize_positions=True, + background=None, triggers=None, storeeventsfor=None, accuracy_period_start=0.550, + accuracy_period_stop=.150, show_intro_screen=True, intro_text='Ready to Start Validation Procedure.', + show_results_screen=True, results_in_degrees=False, save_results_screen=False, + terminate_key="escape", toggle_gaze_cursor_key="g"): + """ + ValidationProcedure is used to test the gaze accuracy of a calibrated eye tracking system. + + Once a ValidationProcedure class instance has been created, the `.run()` method is called to actually start + the validation process, which consists of the following steps: + + 1) (Optionally) Display an Introduction screen. A 'space' key press is used to start target presentation. + 2) Displaying the validation target at each position being validated. Target progression from one + position to the next is controlled by the specified `triggers`, defaulting to a 'space' key press. + The target graphics can simply jump from one position to the next, or optional target_animation settings + can be used to have the target move across the screen from one point to the next and / or expand / contract + at each target location. + 3) (Optionally) Display a Results screen. The Results screen shows each target position, the position of + each sample used for the accuracy calculation, and some validation result statistics. + + Data collected during the validation target sequence is used to calculate accuracy information + for each target position presented. The raw data as well as the computed accuracy stats is + available via the `.getValidationResults()` method. + + To make the validation output consistent across iohub common eye tracker implementations, validation is + performed on monocular eye data. If binocular eye samples are being recorded, the average of the + left and right eye positions is used for each gaze sample. + + Example: + + # Create a *full screen* PsychoPy Window with a valid PsychoPy Monitor file. + win = visual.Window((1920, 1080), fullscr=True, allowGUI=False, monitor='a_VALID_monitor') + + # Start the ioHub process. + iohub_config = dict(experiment_code='validation_demo', session_code='default_session') + iohub_config['eyetracker.hw.mouse.EyeTracker'] = dict(name='tracker') + tracker = io.devices.tracker + + # Run eyetracker calibration + r = tracker.runSetupProcedure() + + # Create a validation target. Use any stim that has `.setPos()`, `.setRadius()`, and `.draw()` methods. + # iohub.client.eyetracker.validation.TargetStim provides a standard doughnut style target. + target_stim = TargetStim(win, radius=0.025, fillcolor=[.5, .5, .5], edgecolor=[-1, -1, -1], edgewidth=2, + dotcolor=[1, -1, -1], dotradius=0.005, colorspace='rgb') + + # target_positions: 9 point calibration + target_positions = [(0.0, 0.0), (0.85, 0.85), (-0.85, 0.0), (0.85, 0.0), (0.85, -0.85), (-0.85, 0.85), + (-0.85, -0.85), (0.0, 0.85), (0.0, -0.85)] + + # Create the validation procedure + validation_proc = ValidationProcedure(win, target=target_stim, positions=target_positions, + randomize_positions=False, + show_intro_screen=True, + intro_text='Eye Tracker Validation', + show_results_screen=True, + save_results_screen=True) + + # Run the validation procedure. Method does not return until the validation is complete. + _validation_results = validation_proc.run() + + + See the validation.py demo in demos.coder.iohub.eyetracking for a more complete example. + + :param win: PsychoPy window to use for validation. Must be full screen. + :param target: Stimulus to use as validation target. If None, default `TargetStim` is used. + :param positions: Positions to validate. Provide list of x,y pairs, or use a `PositionGrid` class. + :param target_animation: + :param randomize_positions: bool: Randomize target positions before presentation. + :param background: color: background color of validation screen. + :param show_intro_screen: bool: Display a validation procedure Introduction screen. + :param intro_text: Introduction screen text. + :param show_results_screen: bool: Display a validation procedure Results screen. + :param results_in_degrees: bool: Convert results to visual degrees. + :param save_results_screen: bool: Save results screen as image. + :param terminate_key: Key that will end the validation procedure. Default is 'escape'. + :param toggle_gaze_cursor_key: Key to toggle gaze cursor visibility (hidden to start). Default is key is 'g'. + :param accuracy_period_start: Time prior to target trigger to use as start of period for valid samples. + :param accuracy_period_stop: Time prior to target trigger to use as end of period for valid samples. + :param triggers: Target progression triggers. Default is 'space' key press. + :param storeeventsfor: iohub devices that events should be stored for. + """ + self.terminate_key = terminate_key + self.toggle_gaze_cursor_key = toggle_gaze_cursor_key + + self.io = ioHubConnection.getActiveConnection() + + if isinstance(positions, (list, tuple)): + positions = PositionGrid(posList=positions, firstposindex=0, repeatFirstPos=False) + self.positions = positions + + self.randomize_positions = randomize_positions + if self.randomize_positions: + self.positions.randomize() + self.win = proxy(win) + if target_animation is None: + target_animation = {} + self.animation_params = target_animation + self.accuracy_period_start = accuracy_period_start + self.accuracy_period_stop = accuracy_period_stop + self.show_intro_screen = show_intro_screen + self.intro_text = intro_text + self.intro_text_stim = None + self.show_results_screen = show_results_screen + self.results_in_degrees = results_in_degrees + self.save_results_screen = save_results_screen + self._validation_results = None + if storeeventsfor is None: + storeeventsfor = [self.io.devices.keyboard, + self.io.devices.mouse, + self.io.devices.tracker, + self.io.devices.experiment + ] + + if triggers is None: + # Use space key press as default target trigger + triggers = KeyboardTrigger(' ', on_press=True) + triggers = Trigger.getTriggersFrom(triggers) + + # Create the ValidationTargetRenderer instance; used to control the sequential + # presentation of the target at each of the grid positions. + self.targetsequence = ValidationTargetRenderer(win, target=target, positions=self.positions, + background=background, + triggers=triggers, storeeventsfor=storeeventsfor, + terminate_key=terminate_key, + gaze_cursor_key=toggle_gaze_cursor_key) + + def run(self): + """ + Run the validation procedure, returning after the full validation process is complete, including: + a) display of an instruction screen + b) display of the target position sequence used for validation data collection. + c) display of a validation accuracy results plot. + """ + keyboard = self.io.devices.keyboard + if self.show_intro_screen: + # Display Validation Intro Screen + self.showIntroScreen() + if self.terminate_key and self.terminate_key in keyboard.waitForReleases(keys=[' ', self.terminate_key]): + print("Escape key pressed. Exiting validation") + self._validation_results = None + return + + # Perform Validation..... + terminate = not self.targetsequence.display(**self.animation_params) + if terminate: + print("Escape key pressed. Exiting validation") + self._validation_results = None + return + + self.io.clearEvents('all') + + self._createValidationResults() + + if self.show_results_screen: + self.showResultsScreen() + kb_presses = keyboard.waitForPresses(keys=[' ', self.terminate_key, self.targetsequence.gaze_cursor_key]) + while ' ' not in kb_presses: + if self.targetsequence.gaze_cursor_key in kb_presses: + self.targetsequence.display_gaze = not self.targetsequence.display_gaze + self.showResultsScreen() + if self.terminate_key in kb_presses: + print("Escape key pressed. Exiting validation") + break + kb_presses = keyboard.waitForPresses(keys=[' ', + self.terminate_key, + self.targetsequence.gaze_cursor_key]) + + return self._validation_results + + def showResultsScreen(self): + self.drawResultScreen() + ftime = self.win.flip() + if self.save_results_screen: + self.win.getMovieFrame() + self.win.saveMovieFrames(self._generateImageName()) + return ftime + + def showIntroScreen(self): + text = self.intro_text + '\nPress SPACE to Start....' + textpos = (0, 0) + if self.intro_text_stim: + self.intro_text_stim.setText(text) + self.intro_text_stim.setPos(textpos) + else: + self.intro_text_stim = visual.TextStim(self.win, text=text, pos=textpos, height=30, color=(0, 0, 0), + colorSpace='rgb255', opacity=1.0, contrast=1.0, units='pix', + ori=0.0, antialias=True, bold=False, italic=False, + anchorHoriz='center', anchorVert='center', + wrapWidth=self.win.size[0] * .8) + + self.intro_text_stim.draw() + return self.win.flip() + + @property + def results(self): + """ + See getValidationResults(). + :return: + """ + return self._validation_results + + def getValidationResults(self): + """ + Return the validation results dict for the last validation run. If a validation as not yet been run(), + None is returned. Validation results are provided separately for each target position and include: + + a) An array of the samples used for the accuracy calculation. The samples used are selected + using the following criteria: + i) Only samples where the target was stationary and not expanding or contracting are selected. + ii) Samples are selected that fall between: + + start_time_filter = last_sample_time - accuracy_period_start + + and + + end_time_filter = last_sample_time - accuracy_period_end + + Therefore, the duration of the selected sample period is: + + selection_period_dur = end_time_filter - start_time_filter + + iii) Sample that contain missing / invalid position data are removed, providing the + final set of samples used for accuracy calculations. The min, max, and mean values + from each set of selected samples is calculated. + + b) The x and y error of sampled gaze position relative to the current target position. + This data is in the same units as is used by the validation window. + + c) The xy distance error from the from each eye's gaze position to the target position. + This is also calculated as an average of both eyes when binocular data is available. + The data is unsigned, providing the absolute distance from gaze to target positions + + Validation Results Dict Structure + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + {'display_bounds': [-1.0, 1.0, 1.0, -1.0], + 'display_pix': array([1920, 1080]), + 'display_units': 'norm', + 'max_error': 2.3668638421479, + 'mean_error': 0.9012516727129639, + 'min_error': 0.0, + 'passed': True, + 'position_count': 9, + 'positions_failed_processing': 0, + 'reporting_unit_type': 'degree', + 'target_positions': [array([0., 0.]), array([0.85, 0.85]), array([-0.85, 0. ]), + array([0.85, 0. ]), array([ 0.85, -0.85]), array([-0.85, 0.85]), + array([-0.85, -0.85]), array([0. , 0.85]), array([ 0. , -0.85])], + 'position_results': [{'index': 0, + 'calculation_status': 'PASSED', + 'target_position': array([0., 0.]), + 'sample_time_range': [4.774341499977744, 6.8343414999777], + 'filter_samples_time_range': [6.2843414999777005, 6.6843414999777], + 'min_error': 0.0, + 'max_error': 0.7484680652684592, + 'mean_error': 0.39518431321527914, + 'stdev_error': 0.24438398690651483, + 'valid_filtered_sample_perc': 1.0, + }, + # Validation results dict is given for each target position + # .... + ] + } + + :return: validation results dict. + """ + return self._validation_results + + def _createValidationResults(self): + """ + Create validation results dict and save validation analysis info as experiment messages to + the iohub .hdf5 file. + + :return: dict + """ + self._validation_results = None + sample_array = self.targetsequence.getSampleMessageData() + target_positions_used = self.targetsequence.positions.getPositions() + + if self.results_in_degrees: + for postdat in sample_array: + postdat['targ_pos_x'], postdat['targ_pos_y'] = toDeg(self.win, + *toPix(self.win, postdat['targ_pos_x'], + postdat['targ_pos_y'])) + + if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + postdat['left_eye_x'], postdat['left_eye_y'] = toDeg(self.win, + *toPix(self.win, postdat['left_eye_x'], + postdat['left_eye_y'])) + + postdat['right_eye_x'], postdat['right_eye_y'] = toDeg(self.win, + *toPix(self.win, postdat['right_eye_x'], + postdat['right_eye_y'])) + else: + postdat['eye_x'], postdat['eye_y'] = toDeg(self.win, + *toPix(self.win, postdat['eye_x'], postdat['eye_y'])) + + min_error = 100000.0 + max_error = 0.0 + summed_error = 0.0 + point_count = 0 + + self.io.sendMessageEvent('Results', 'VALIDATION') + results = dict(display_units=self.win.units, display_bounds=self.positions.bounds, + display_pix=self.win.size, position_count=len(sample_array), + target_positions=target_positions_used) + + for k, v in results.items(): + self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') + + results['position_results'] = [] + results['positions_failed_processing'] = 0 + + for pindex, samplesforpos in enumerate(sample_array): + self.io.sendMessageEvent('Target Position Results: {0}'.format(pindex), 'VALIDATION') + + stationary_samples = samplesforpos[samplesforpos['targ_state'] == self.targetsequence.TARGET_STATIONARY] + + if len(stationary_samples): + last_stime = stationary_samples[-1]['eye_time'] + first_stime = stationary_samples[0]['eye_time'] + + filter_stime = last_stime - self.accuracy_period_start + filter_etime = last_stime - self.accuracy_period_stop + + all_samples_in_period = stationary_samples[stationary_samples['eye_time'] >= filter_stime] + all_samples_in_period = all_samples_in_period[all_samples_in_period['eye_time'] < filter_etime] + + good_samples_in_period = all_samples_in_period[all_samples_in_period['eye_status'] == 0] + + all_samples_count = all_samples_in_period.shape[0] + good_sample_count = good_samples_in_period.shape[0] + try: + good_sample_ratio = good_sample_count / float(all_samples_count) + except ZeroDivisionError: + good_sample_ratio = 0 + else: + all_samples_in_period = [] + good_samples_in_period = [] + good_sample_ratio = 0 + + # Dictionary of the different levels of samples selected during filtering + # for valid samples to use in accuracy calculations. + sample_msg_data_filtering = dict(all_samples=samplesforpos, # All samples from target period. + # Sample during stationary period at end of target + # presentation display. + stationary_samples=stationary_samples, + # Samples that occurred within the + # defined time selection period. + time_filtered_samples=all_samples_in_period, + # Samples from the selection period that + # do not have missing data + used_samples=good_samples_in_period) + + position_results = dict(index=pindex, + target_position=target_positions_used[pindex], + sample_time_range=[first_stime, last_stime], + filter_samples_time_range=[filter_stime, filter_etime], + valid_filtered_sample_perc=good_sample_ratio) + + for k, v in position_results.items(): + self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') + + position_results['sample_from_filter_stages'] = sample_msg_data_filtering + + position_results2 = dict() + if int(good_sample_ratio * 100) == 0: + position_results2['calculation_status'] = 'FAILED' + results['positions_failed_processing'] += 1 + else: + target_x = good_samples_in_period[:]['targ_pos_x'] + target_y = good_samples_in_period[:]['targ_pos_y'] + + if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + left_x = good_samples_in_period[:]['left_eye_x'] + left_y = good_samples_in_period[:]['left_eye_y'] + left_error_x = target_x - left_x + left_error_y = target_y - left_y + left_error_xy = np.hypot(left_error_x, left_error_y) + + right_x = good_samples_in_period[:]['right_eye_x'] + right_y = good_samples_in_period[:]['right_eye_y'] + right_error_x = target_x - right_x + right_error_y = target_y - right_y + right_error_xy = np.hypot(right_error_x, right_error_y) + + lr_error = (right_error_xy + left_error_xy) / 2.0 + lr_error_max = lr_error.max() + lr_error_min = lr_error.min() + lr_error_mean = lr_error.mean() + lr_error_std = np.std(lr_error) + min_error = min(min_error, lr_error_min) + max_error = max(max_error, lr_error_max) + summed_error += lr_error_mean + point_count += 1.0 + else: + eye_x = good_samples_in_period[:]['eye_x'] + eye_y = good_samples_in_period[:]['eye_y'] + error_x = target_x - eye_x + error_y = target_y - eye_y + error_xy = np.hypot(error_x, error_y) + lr_error = error_xy + lr_error_max = lr_error.max() + lr_error_min = lr_error.min() + lr_error_mean = lr_error.mean() + lr_error_std = np.std(lr_error) + min_error = min(min_error, lr_error_min) + max_error = max(max_error, lr_error_max) + summed_error += lr_error_mean + point_count += 1.0 + + position_results2['calculation_status'] = 'PASSED' + position_results2['min_error'] = lr_error_min + position_results2['max_error'] = lr_error_max + position_results2['mean_error'] = lr_error_mean + position_results2['stdev_error'] = lr_error_std + for k, v in position_results2.items(): + self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') + position_results[k] = v + results['position_results'].append(position_results) + self.io.sendMessageEvent('Done Target Position Results : {0}'.format(pindex), 'VALIDATION') + + unit_type = self.win.units + if self.results_in_degrees: + unit_type = 'degree' + mean_error = summed_error / point_count + err_results = dict(reporting_unit_type=unit_type, min_error=min_error, max_error=max_error, + mean_error=mean_error, passed=results['positions_failed_processing'] == 0, + positions_failed_processing=results['positions_failed_processing']) + + for k, v in err_results.items(): + self.io.sendMessageEvent('{}: {}'.format(k, v), 'VALIDATION') + results[k] = v + + self.io.sendMessageEvent('Validation Report Complete', 'VALIDATION') + + self._validation_results = results + return self._validation_results + + def _generateImageName(self): + import datetime + file_name = 'validation_' + datetime.datetime.now().strftime('%d_%m_%Y_%H_%M') + '.png' + #if self.save_results_screen: + # return normjoin(self.save_results_screen, file_name) + rootScriptPath = os.path.dirname(sys.argv[0]) + return normjoin(rootScriptPath, file_name) + + def drawResultScreen(self): + """ + Draw validation results screen. + :return: + """ + + results = self.getValidationResults() + + for tp in self.positions.getPositions(): + self.targetsequence.target.setPos(tp) + self.targetsequence.target.draw() + + title_txt = 'Validation Results\nMin: %.4f, Max: %.4f,' \ + ' Mean %.4f (%s units)' % (results['min_error'], results['max_error'], + results['mean_error'], results['reporting_unit_type']) + title_stim = visual.TextStim(self.win, text=title_txt, height=24, pos=(0.0, (self.win.size[1] / 2.0) * .95), + color=(0, 0, 0), colorSpace='rgb255', units='pix', antialias=True, + anchorVert='center', anchorHoriz='center', wrapWidth=self.win.size[0] * .8) + title_stim.draw() + + exit_text = visual.TextStim(self.win, text='Press SPACE to continue.', opacity=1.0, units='pix', height=None, + pos=(0.0, -(self.win.size[1] / 2.0) * .95), color=(0, 0, 0), colorSpace='rgb255', + antialias=True, bold=True, anchorVert='center', anchorHoriz='center', + wrapWidth=self.win.size[0] * .8) + exit_text.draw() + + color_list = pl.cm.tab20b(np.linspace(0, 1, (len(results['position_results'])))) + # draw eye samples + ci = 0 + for position_results in results['position_results']: + color = color_list[ci] * 2.0 - 1.0 + utype = 'pix' + target_x, target_y = position_results['target_position'] + + sample_gfx_radius = deg2pix(0.33, self.win.monitor, correctFlat=False) + if self.results_in_degrees: + sample_gfx_radius = 0.33 + utype='deg' + sample_gfx = visual.Circle(self.win, radius=sample_gfx_radius, fillColor=color, lineColor=[1, 1, 1], + lineWidth=1, edges=64, units=utype, colorSpace='rgb', opacity=0.66, + interpolate=True, autoLog=False) + + if position_results['calculation_status'] == 'FAILED': + position_txt = "Failed" + txt_bold = True + position_txt_color = "red" + else: + samples = position_results['sample_from_filter_stages']['used_samples'] + if self.targetsequence.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + gaze_x = (samples[:]['left_eye_x'] + samples[:]['right_eye_x']) / 2.0 + gaze_y = (samples[:]['left_eye_y'] + samples[:]['right_eye_y']) / 2.0 + else: + gaze_x = samples[:]['eye_x'] + gaze_y = samples[:]['eye_y'] + + for i in range(len(gaze_x)): + if self.results_in_degrees: + g_pos = gaze_x[i], gaze_y[i] + else: + g_pos = toPix(self.win, gaze_x[i], gaze_y[i]) + g_pos = g_pos[0][0], g_pos[1][0] + sample_gfx.setPos(g_pos) + sample_gfx.draw() + txt_bold = False + position_txt = "Gaze Error:\nMin: %.4f\nMax: %.4f\n" \ + "Avg: %.4f\nStdev: %.4f" % (position_results['min_error'], + position_results['max_error'], + position_results['mean_error'], + position_results['stdev_error']) + position_txt_color = "green" + + if self.targetsequence.display_gaze: + text_pix_pos = toPix(self.win, target_x, target_y) + text_pix_pos = text_pix_pos[0][0], text_pix_pos[1][0] + target_text_stim = visual.TextStim(self.win, text=position_txt, units='pix', pos=text_pix_pos, + height=21, color=position_txt_color, antialias=True, + bold=txt_bold, anchorVert='center', anchorHoriz='center') + target_text_stim.draw() + ci += 1 + + +class ValidationTargetRenderer(object): + TARGET_STATIONARY = 1 + TARGET_MOVING = 2 + TARGET_EXPANDING = 4 + TARGET_CONTRACTING = 8 + # Experiment Message text field types and tokens + message_types = dict(BEGIN_SEQUENCE=('BEGIN_SEQUENCE', '', int), + DONE_SEQUENCE=('DONE_SEQUENCE', '', int), + NEXT_POS_TRIG=('NEXT_POS_TRIG', '', int, float), + START_DRAW=('START_DRAW', ',', int, float, float, float, float), + SYNCTIME=('SYNCTIME', ',', int, float, float, float, float), + EXPAND_SIZE=('EXPAND_SIZE', '', float, float), + CONTRACT_SIZE=('CONTRACT_SIZE', '', float, float), + POS_UPDATE=('POS_UPDATE', ',', float, float), + TARGET_POS=('TARGET_POS', ',', float, float)) + max_msg_type_length = max([len(s) for s in message_types.keys()]) + binocular_sample_message_element = [('targ_pos_ix', np.int), + ('last_msg_time', np.float64), + ('last_msg_type', np.str, max_msg_type_length), + ('next_msg_time', np.float64), + ('next_msg_type', np.str, max_msg_type_length), + ('targ_pos_x', np.float64), + ('targ_pos_y', np.float64), + ('targ_state', np.int), + ('eye_time', np.float64), + ('eye_status', np.int), + ('left_eye_x', np.float64), + ('left_eye_y', np.float64), + ('left_pupil_size', np.float64), + ('right_eye_x', np.float64), + ('right_eye_y', np.float64), + ('right_pupil_size', np.float64)] + monocular_sample_message_element = [('targ_pos_ix', np.int), + ('last_msg_time', np.float64), + ('last_msg_type', np.str, max_msg_type_length), + ('next_msg_time', np.float64), + ('next_msg_type', np.str, max_msg_type_length), + ('targ_pos_x', np.float64), + ('targ_pos_y', np.float64), + ('targ_state', np.int), + ('eye_time', np.float64), + ('eye_status', np.int), + ('eye_x', np.float64), + ('eye_y', np.float64), + ('pupil_size', np.float64)] + + def __init__(self, win, target, positions, background=None, storeeventsfor=[], triggers=None, msgcategory='', + io=None, terminate_key='escape', gaze_cursor_key='g'): + """ + ValidationTargetRenderer is an internal class used by `ValidationProcedure`. + + psychopy.iohub.client.eyetracker.validation.Trigger based classes are used + to define the criteria used to start displaying the next target position graphics. + By providing a set of DeviceEventTriggers, complex criteria for + target position pacing can be defined. + + iohub devices can be provided in the storeeventsfor keyword argument. + Events which occur during each target position presentation period are + stored and are available at the end of the display() period, grouped by + position index and device event types. + + :param win: + :param target: + :param positions: + :param background: + :param storeeventsfor: + :param triggers: + :param msgcategory: + :param io: + """ + self.terminate_key = terminate_key + self.gaze_cursor_key = gaze_cursor_key + self.display_gaze = False + gc_size = deg2pix(3.0, win.monitor, correctFlat=False) + self.gaze_cursor = visual.GratingStim(win, tex=None, mask='gauss', pos=(0, 0), size=(gc_size, gc_size), + color='green', units='pix', opacity=0.8) + self._terminate_requested = False + self.win = proxy(win) + self.target = target + self.background = background + self.positions = positions + self.storeevents = storeeventsfor + self.msgcategory = msgcategory + + if io is None: + io = ioHubConnection.getActiveConnection() + self.io = io + self._keyboard = self.io.devices.keyboard + + # If storeevents is True, targetdata will be a list of dict's. + # Each dict, among other things, contains all ioHub events that occurred + # from when a target was first presented at a position, to when the + # the wait period completed for that position. + # + self.targetdata = [] + self.triggers = triggers + + def _draw(self): + """ + Fill the window with the specified background color and draw the + target stim. + """ + if self.background: + self.background.draw() + self.target.draw() + if self.display_gaze: + gpos = self.io.devices.tracker.getLastGazePosition() + valid_gaze_pos = isinstance(gpos, (tuple, list)) + if valid_gaze_pos: + pix_pos = toPix(self.win, *gpos) + pix_pos = pix_pos[0][0], pix_pos[1][0] + self.gaze_cursor.setPos(pix_pos) + self.gaze_cursor.draw() + + def _animateTarget(self, topos, frompos, **kwargs): + """ + Any logic related to drawing the target at the new screen position, + including any intermediate animation effects, is done here. + + Return the flip time when the target was first drawn at the newpos + location. + """ + io = self.io + if frompos is not None: + velocity = kwargs.get('velocity') + if velocity: + starttime = getTime() + a, b = np.abs(topos - frompos) ** 2 + duration = np.sqrt(a + b) / velocity + arrivetime = duration + starttime + fliptime = starttime + while fliptime < arrivetime: + mu = (fliptime - starttime) / duration + tpos = frompos * (1.0 - mu) + topos * mu + self.target.setPos(frompos * (1.0 - mu) + topos * mu) + self._draw() + fliptime = self.win.flip() + io.sendMessageEvent('POS_UPDATE %.4f,%.4f' % (tpos[0], tpos[1]), self.msgcategory, + sec_time=fliptime) + self._addDeviceEvents() + if self._terminate_requested: + return 0 + + self.target.setPos(topos) + self._draw() + fliptime = self.win.flip() + io.sendMessageEvent('TARGET_POS %.4f,%.4f' % (topos[0], topos[1]), self.msgcategory, sec_time=fliptime) + self._addDeviceEvents() + + expandedscale = kwargs.get('expandedscale') + expansionduration = kwargs.get('expansionduration') + contractionduration = kwargs.get('contractionduration') + + initialradius = self.target.radius + if expandedscale: + expandedradius = self.target.radius * expandedscale + + if expansionduration: + starttime = fliptime + expandedtime = fliptime + expansionduration + while fliptime < expandedtime: + mu = (fliptime - starttime) / expansionduration + cradius = initialradius * (1.0 - mu) + expandedradius * mu + self.target.setRadius(cradius) + self._draw() + fliptime = self.win.flip() + io.sendMessageEvent('EXPAND_SIZE %.4f %.4f' % (cradius, initialradius), self.msgcategory, + sec_time=fliptime) + self._addDeviceEvents() + if self._terminate_requested: + return 0 + if contractionduration: + starttime = fliptime + contractedtime = fliptime + contractionduration + while fliptime < contractedtime: + mu = (fliptime - starttime) / contractionduration + cradius = expandedradius * (1.0 - mu) + initialradius * mu + self.target.setRadius(cradius) + self._draw() + fliptime = self.win.flip() + io.sendMessageEvent('CONTRACT_SIZE %.4f %.4f' % (cradius, initialradius), self.msgcategory, + sec_time=fliptime) + self._addDeviceEvents() + if self._terminate_requested: + return 0 + + self.target.setRadius(initialradius) + return fliptime + + def moveTo(self, topos, frompos, **kwargs): + """ + Indicates that the target should be moved frompos to topos. + + If a PositionGrid has been provided, moveTo should not be called + directly. Instead, use the display() method to start the full + target position presentation sequence. + """ + io = self.io + fpx, fpy = -1, -1 + if frompos is not None: + fpx, fpy = frompos[0], frompos[1] + io.sendMessageEvent('START_DRAW %d %.4f,%.4f %.4f,%.4f' % (self.positions.posIndex, fpx, fpy, topos[0], + topos[1]), self.msgcategory) + fliptime = self._animateTarget(topos, frompos, **kwargs) + io.sendMessageEvent('SYNCTIME %d %.4f,%.4f %.4f,%.4f' % (self.positions.posIndex, fpx, fpy, topos[0], topos[1]), + self.msgcategory, sec_time=fliptime) + + # wait for trigger to fire + last_pump_time = fliptime + trig_fired = self._hasTriggerFired(start_time=fliptime) + while not trig_fired: + if getTime() - last_pump_time >= 0.250: + win32MessagePump() + last_pump_time = getTime() + + if self.display_gaze: + self._draw() + self.win.flip() + else: + sleep(0.001) + + if self._checkForTerminate(): + return + self._checkForToggleGaze() + trig_fired = self._hasTriggerFired(start_time=fliptime) + + def _hasTriggerFired(self, **kwargs): + """ + Used internally to know when one of the triggers has occurred and + the target should move to the next target position. + """ + # wait for trigger to fire + triggered = None + for trig in self.triggers: + if trig.triggered(**kwargs): + triggered = trig + self._addDeviceEvents(trig.clearEventHistory(True)) + if triggered: + break + + if triggered: + # by default, assume it was a timer trigger,so use 255 as 'event type' + event_type_id = 255 + trig_evt = triggered.getTriggeringEvent() + if hasattr(trig_evt, 'type'): + # actually it was a device event trigger + event_type_id = trig_evt.type + # get time trigger of trigger event + event_time = triggered.getTriggeringTime() + self.io.sendMessageEvent('NEXT_POS_TRIG %d %.3f' % (event_type_id, event_time), self.msgcategory) + for trig in self.triggers: + trig.resetTrigger() + return triggered + + def _initTargetData(self, frompos, topos): + """ + Internally used to create the data structure used to store position + information and events which occurred during each target position + period. + """ + if self.storeevents: + deviceevents = {} + for device in self.storeevents: + deviceevents[device] = [] + self.targetdata.append(dict(frompos=frompos, topos=topos, events=deviceevents)) + + def _addDeviceEvents(self, device_event_dict={}): + if self._checkForTerminate(): + return + self._checkForToggleGaze() + dev_event_buffer = self.targetdata[-1]['events'] + for dev, dev_events in dev_event_buffer.items(): + if dev in device_event_dict: + dev_events.extend(device_event_dict[dev]) + else: + dev_events.extend(dev.getEvents()) + + def _checkForTerminate(self): + keys = self._keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=False) + for k in keys: + if k.key == self.terminate_key: + self._terminate_requested = True + break + return self._terminate_requested + + def _checkForToggleGaze(self): + keys = self._keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=False) + for k in keys: + if k.key == self.gaze_cursor_key: + # get (clear) the event so it does not trigger multiple times. + self._keyboard.getEvents(EventConstants.KEYBOARD_PRESS, clearEvents=True) + self.display_gaze = not self.display_gaze + self._draw() + self.win.flip() + return self.display_gaze + return self.display_gaze + + def display(self, **kwargs): + """ + Display the target at each point in the position grid, performing + target animation if requested. The target then holds position until one + of the specified triggers occurs, resulting in the target moving to the + next position in the positiongrid. + + To setup target animation between grid positions, the following keyword + arguments are supported. If an option is not specified, the animation + related to it is not preformed. + + velocity: The rate (units / second) at which the target should move + from a current target position to the next target position. + The value should be in the unit type the target stimulus + is using. + + expandedscale: When a target stimulus is at the current grid position, + the target graphic can expand to a size equal to the + original target radius * expandedscale. + + expansionduration: If expandedscale has been specified, this option is + used to set how long it should take for the target to + reach the full expanded target size. Time is in sec. + + contractionduration: If a target has been expanded, this option is used + to specify how many seconds it should take for the + target to contract back to the original target + radius. + + Note that target expansion and contraction change the target stimulus + outer diameter only. The edge thickness and central dot radius do not + change. + + All movement and size changes are linear in fashion. + + For example, to display a static target at each grid position:: + + targetsequence.display() + + To have the target stim move between each grid position + at 400 pixels / sec and not expand or contract:: + + targetsequence.display(velocity=400.0) + + If the target should jump from one grid position to the next, and then + expand to twice the radius over a 0.5 second period:: + + targetsequence.display( + expandedscale=2.0, + expansionduration=0.50 + ) + + To do a similar animation as the pervious example, but also have the + target contract back to it's original size over 0.75 seconds:: + + targetsequence.display( + expandedscale=2.0, + expansionduration=0.50, + contractionduration=0.75 + ) + + When this method returns, the target has been displayed at all + positions. Data collected for each position period can be accessed via + the targetdata attribute. + """ + del self.targetdata[:] + prevpos = None + + io = self.io + io.clearEvents('all') + io.sendMessageEvent('BEGIN_SEQUENCE {0}'.format(len(self.positions.positions)), self.msgcategory) + turn_rec_off = [] + for d in self.storeevents: + if not d.isReportingEvents(): + d.enableEventReporting(True) + turn_rec_off.append(d) + + sleep(0.025) + for pos in self.positions: + self._initTargetData(prevpos, pos) + self._addDeviceEvents() + if self._terminate_requested: + break + self.moveTo(pos, prevpos, **kwargs) + prevpos = pos + self._addDeviceEvents() + if self._terminate_requested: + break + + for d in turn_rec_off: + d.enableEventReporting(False) + + if self._terminate_requested: + io.sendMessageEvent('VALIDATION TERMINATED BY USER', self.msgcategory) + return False + + io.sendMessageEvent('DONE_SEQUENCE {0}'.format(len(self.positions.positions)), self.msgcategory) + sleep(0.025) + self._addDeviceEvents() + io.clearEvents('all') + return True + + def _processMessageEvents(self): + self.target_pos_msgs = [] + self.saved_pos_samples = [] + for pd in self.targetdata: + events = pd.get('events') + + # create a dict of device labels as keys, device events as value + devlabel_events = {} + for k, v in events.items(): + devlabel_events[k.getName()] = v + + samples = devlabel_events.get('tracker', []) + # remove any eyetracker events that are not samples + samples = [s for s in samples if s.type in (EventConstants.BINOCULAR_EYE_SAMPLE, + EventConstants.MONOCULAR_EYE_SAMPLE)] + self.saved_pos_samples.append(samples) + + self.sample_type = self.saved_pos_samples[0][0].type + if self.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + self.sample_msg_dtype = self.binocular_sample_message_element + else: + self.sample_msg_dtype = self.monocular_sample_message_element + + messages = devlabel_events.get('experiment', []) + msg_lists = [] + for m in messages: + temp = m.text.strip().split() + msg_type = self.message_types.get(temp[0]) + if msg_type: + current_msg = [m.time, m.category] + if msg_type[1] == ',': + for t in temp: + current_msg.extend(t.split(',')) + else: + current_msg.extend(temp) + + for mi, dtype in enumerate(msg_type[2:]): + current_msg[mi + 3] = dtype(current_msg[mi + 3]) + + msg_lists.append(current_msg) + + if msg_lists[0][2] == 'NEXT_POS_TRIG': + # handle case where the trigger msg from the previous target + # message was not read until the start of the next pos. + # In which case, move msg to end of previous targ pos msgs + npm = msg_lists.pop(0) + self.target_pos_msgs[-1].append(npm) + + self.target_pos_msgs.append(msg_lists) + + for i in range(len(self.target_pos_msgs)): + self.target_pos_msgs[i] = np.asarray(self.target_pos_msgs[i], dtype=object) + + return self.target_pos_msgs + + def getSampleMessageData(self): + """ + Return a list of numpy ndarrays, each containing joined eye sample + and previous / next experiment message data for the sample's time. + """ + # preprocess message events + self._processMessageEvents() + + # inline func to return sample field array based on sample namedtup + def getSampleData(s): + sampledata = [s.time, s.status] + if self.sample_type == EventConstants.BINOCULAR_EYE_SAMPLE: + sampledata.extend((s.left_gaze_x, s.left_gaze_y, s.left_pupil_measure1, + s.right_gaze_x, s.right_gaze_y, s.right_pupil_measure1)) + return sampledata + + sampledata.extend((s.gaze_x, s.gaze_y, s.pupil_measure1)) + return sampledata + + current_target_pos = -1.0, -1.0 + current_targ_state = 0 + target_pos_samples = [] + for pindex, samples in enumerate(self.saved_pos_samples): + last_msg, messages = self.target_pos_msgs[pindex][0], self.target_pos_msgs[pindex][1:] + samplesforposition = [] + pos_sample_count = len(samples) + si = 0 + for current_msg in messages: + last_msg_time = last_msg[0] + last_msg_type = last_msg[2] + if last_msg_type == 'START_DRAW': + if not current_targ_state & self.TARGET_STATIONARY: + current_targ_state += self.TARGET_STATIONARY + current_targ_state -= current_targ_state & self.TARGET_MOVING + current_targ_state -= current_targ_state & self.TARGET_EXPANDING + current_targ_state -= current_targ_state & self.TARGET_CONTRACTING + elif last_msg_type == 'EXPAND_SIZE': + if not current_targ_state & self.TARGET_EXPANDING: + current_targ_state += self.TARGET_EXPANDING + current_targ_state -= current_targ_state & self.TARGET_CONTRACTING + elif last_msg_type == 'CONTRACT_SIZE': + if not current_targ_state & self.TARGET_CONTRACTING: + current_targ_state += self.TARGET_CONTRACTING + current_targ_state -= current_targ_state & self.TARGET_EXPANDING + elif last_msg_type == 'TARGET_POS': + current_target_pos = float(last_msg[3]), float(last_msg[4]) + current_targ_state -= current_targ_state & self.TARGET_MOVING + if not current_targ_state & self.TARGET_STATIONARY: + current_targ_state += self.TARGET_STATIONARY + elif last_msg_type == 'POS_UPDATE': + current_target_pos = float(last_msg[3]), float(last_msg[4]) + if not current_targ_state & self.TARGET_MOVING: + current_targ_state += self.TARGET_MOVING + current_targ_state -= current_targ_state & self.TARGET_STATIONARY + elif last_msg_type == 'SYNCTIME': + if not current_targ_state & self.TARGET_STATIONARY: + current_targ_state += self.TARGET_STATIONARY + current_targ_state -= current_targ_state & self.TARGET_MOVING + current_targ_state -= current_targ_state & self.TARGET_EXPANDING + current_targ_state -= current_targ_state & self.TARGET_CONTRACTING + current_target_pos = float(last_msg[6]), float(last_msg[7]) + + while si < pos_sample_count: + sample = samples[si] + if last_msg_time <= sample.time < current_msg[0]: + sarray = [pindex, last_msg_time, last_msg_type, + current_msg[0], current_msg[2], + current_target_pos[0], current_target_pos[1], + current_targ_state] + sarray.extend(getSampleData(sample)) + sndarray = np.asarray(tuple(sarray), dtype=self.sample_msg_dtype) + samplesforposition.append(sndarray) + si += 1 + elif sample.time >= current_msg[0]: + break + else: + si += 1 + last_msg = current_msg + + possamples = np.asanyarray(samplesforposition) + target_pos_samples.append(possamples) + + # So we now have a list len == number target positions. Each element + # of the list is a list of all eye sample / message data for a + # target position. Each element of the data list for a single target + # position is itself a list that that contains combined info about + # an eye sample and message info valid for when the sample time was. + return np.asanyarray(target_pos_samples, dtype=object) + + +def toPix(win, x, y): + """Returns the stim's position in pixels, + based on its pos, units, and win. + """ + try: + xy = np.zeros((len(x), 2)) + except TypeError: + xy = np.zeros((1, 2)) + + xy[:, 0] = x + xy[:, 1] = y + r = convertToPix(np.asarray((0, 0)), xy, win.units, win) + return r[:, 0], r[:, 1] + + +def toDeg(win, x, y): + try: + xy = np.zeros((len(x), 2)) + except TypeError: + xy = np.zeros((1, 2)) + xy[:, 0] = x + xy[:, 1] = y + r = pix2deg(xy, win.monitor, correctFlat=False) + return r[:, 0], r[:, 1] diff --git a/psychopy/iohub/client/eyetracker/validation/trigger.py b/psychopy/iohub/client/eyetracker/validation/trigger.py new file mode 100644 index 00000000000..1a1483c5cf8 --- /dev/null +++ b/psychopy/iohub/client/eyetracker/validation/trigger.py @@ -0,0 +1,240 @@ +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). + +from psychopy import core +from psychopy.iohub.constants import EventConstants +from psychopy.iohub.client import ioHubConnection + +getTime = core.getTime + + +class Trigger(object): + io = None + + def __init__(self, trigger_function=lambda a, b, c: True, user_kwargs={}, repeat_count=0): + Trigger.io = ioHubConnection.getActiveConnection() + self.trigger_function = trigger_function + self.user_kwargs = user_kwargs + self._last_triggered_event = None + self._last_triggered_time = None + self.repeat_count = repeat_count + self.triggered_count = 0 + + def triggered(self, **kwargs): + if 0 <= self.repeat_count < self.triggered_count: + return False + return True + + def getTriggeringEvent(self): + return self._last_triggered_event + + def getTriggeringTime(self): + return self._last_triggered_time + + def getTriggeredStateCallback(self): + return self.trigger_function, self.user_kwargs + + def resetLastTriggeredInfo(self): + self._last_triggered_event = None + self._last_triggered_time = None + + def resetTrigger(self): + self.resetLastTriggeredInfo() + self.triggered_count = 0 + + @classmethod + def getEventBuffer(cls, copy=False): + return {} + + @classmethod + def clearEventHistory(cls, returncopy=False): + if returncopy: + return {} + + @classmethod + def getTriggersFrom(cls, triggers): + """ + Returns a list of Trigger instances generated based on the contents of the + input triggers. + + :param triggers: + :return: + """ + # Handle different valid trigger object types + if isinstance(triggers, (list, tuple)): + # Support is provided for a list of Trigger objects or a list of + # strings. + t1 = triggers[0] + if isinstance(t1, str): + # triggers is a list of strings, so try and create a list of + # DeviceEventTrigger's using keyboard device, KEYBOARD_RELEASE + # event type, and the triggers list elements each as the + # event.key. + kbtriggers = [] + for c in triggers: + kbtriggers.append(KeyboardTrigger(c, on_press=False)) + trig_list = kbtriggers + else: + # Assume triggers is a list of Trigger objects + trig_list = triggers + elif isinstance(triggers, (int, float)): + # triggers is a number, so assume a TimeTrigger is wanted where + # the delay == triggers. start time will be the fliptime of the + # last update for drawing to the new target position. + trig_list = (TimeTrigger(start_time=None, delay=triggers),) + elif isinstance(triggers, str): + # triggers is a string, so try and create a + # DeviceEventTrigger using keyboard device, KEYBOARD_RELEASE + # event type, and triggers as the event.key. + trig_list = [KeyboardTrigger(triggers, on_press=False), ] + elif isinstance(triggers, Trigger): + # A single Trigger object was provided + trig_list = (triggers,) + else: + raise ValueError('The triggers kwarg could not be understood as a valid triggers input value.') + return trig_list + + +class TimeTrigger(Trigger): + """ + A TimeTrigger associates a delay from the provided start_time + parameter to when the classes triggered() method returns True. + start_time and delay can be sec.msec float, or a callable object + (that takes no parameters). + """ + + def __init__(self, start_time, delay, repeat_count=0, trigger_function=lambda a, b, c: True, user_kwargs={}): + Trigger.io = ioHubConnection.getActiveConnection() + Trigger.__init__(self, trigger_function, user_kwargs, repeat_count) + + self._start_time = start_time + + if start_time is None or not callable(start_time): + def startTimeFunc(): + if self._start_time is None: + self._start_time = getTime() + return self._start_time + + self.startTime = startTimeFunc + else: + self.startTime = start_time + + self.delay = delay + if not callable(delay): + def delayFunc(): + return delay + + self.delay = delayFunc + + def triggered(self, **kwargs): + if Trigger.triggered(self) is False: + return False + + if self.startTime is None: + start_time = kwargs.get('start_time') + else: + start_time = self.startTime() + + if self.delay is None: + delay = kwargs.get('delay') + else: + delay = self.delay() + + ct = getTime() + if ct - start_time >= delay: + self._last_triggered_time = ct + self._last_triggered_event = ct + self.triggered_count += 1 + return True + return False + + def resetTrigger(self): + self.resetLastTriggeredInfo() + self.triggered_count = 0 + self._start_time = None + + +class DeviceEventTrigger(Trigger): + """ + A DeviceEventTrigger associates a set of conditions for a + DeviceEvent that must be met before the classes triggered() method + returns True. + """ + _lastEventsByDevice = dict() + + def __init__(self, device, event_type, event_attribute_conditions={}, repeat_count=-1, + trigger_function=lambda a, b, c: True, user_kwargs={}): + Trigger.io = ioHubConnection.getActiveConnection() + Trigger.__init__(self, trigger_function, user_kwargs, repeat_count) + self.device = device + self.event_type = event_type + self.event_attribute_conditions = event_attribute_conditions + + def triggered(self, **kwargs): + if Trigger.triggered(self) is False: + return False + + events = self.device.getEvents() + if events is None: + events = [] + if self.device in self._lastEventsByDevice: + self._lastEventsByDevice[self.device].extend(events) + else: + self._lastEventsByDevice[self.device] = events + unhandledEvents = self._lastEventsByDevice.get(self.device, []) + + for event in unhandledEvents: + foundEvent = True + if event.type != self.event_type: + foundEvent = False + else: + for (attrname, conds) in self.event_attribute_conditions.items(): + if isinstance(conds, (list, tuple)) and getattr(event, attrname) in conds: + # event_value is a list or tuple of possible values + # that are OK + pass + elif getattr(event, attrname) is conds or getattr(event, attrname) == conds: + # event_value is a single value + pass + else: + foundEvent = False + + if foundEvent is True: + self._last_triggered_time = getTime() + self._last_triggered_event = event + self.triggered_count += 1 + return True + + return False + + @classmethod + def getEventBuffer(cls, copy=False): + if copy: + return dict(cls._lastEventsByDevice) + return cls._lastEventsByDevice + + @classmethod + def clearEventHistory(cls, returncopy=False): + eventbuffer = None + if returncopy: + eventbuffer = dict(cls._lastEventsByDevice) + cls._lastEventsByDevice.clear() + return eventbuffer + + def resetLastTriggeredInfo(self): + Trigger.resetLastTriggeredInfo(self) + if self.device in self._lastEventsByDevice: + del self._lastEventsByDevice[self.device] + + +class KeyboardTrigger(DeviceEventTrigger): + def __init__(self, key, on_press=False): + Trigger.io = ioHubConnection.getActiveConnection() + if on_press: + etype = EventConstants.KEYBOARD_PRESS + else: + etype = EventConstants.KEYBOARD_RELEASE + DeviceEventTrigger.__init__(self, self.io.devices.keyboard, event_type=etype, + event_attribute_conditions={'key': key}) diff --git a/psychopy/iohub/client/keyboard.py b/psychopy/iohub/client/keyboard.py index d89483140d6..26f22985048 100644 --- a/psychopy/iohub/client/keyboard.py +++ b/psychopy/iohub/client/keyboard.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function diff --git a/psychopy/iohub/client/wintab.py b/psychopy/iohub/client/wintab.py index d39113cf490..448653c0cb6 100644 --- a/psychopy/iohub/client/wintab.py +++ b/psychopy/iohub/client/wintab.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function diff --git a/psychopy/iohub/constants.py b/psychopy/iohub/constants.py index f4693fb5067..ee25692ff27 100644 --- a/psychopy/iohub/constants.py +++ b/psychopy/iohub/constants.py @@ -1,8 +1,7 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- - +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import diff --git a/psychopy/iohub/datastore/__init__.py b/psychopy/iohub/datastore/__init__.py index be26f94ed50..bca0499b582 100644 --- a/psychopy/iohub/datastore/__init__.py +++ b/psychopy/iohub/datastore/__init__.py @@ -1,8 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function diff --git a/psychopy/iohub/datastore/util.py b/psychopy/iohub/datastore/util.py index 84307761913..93ee54376a1 100644 --- a/psychopy/iohub/datastore/util.py +++ b/psychopy/iohub/datastore/util.py @@ -1,8 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- - # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function diff --git a/psychopy/iohub/devices/__init__.py b/psychopy/iohub/devices/__init__.py index 103a06a271b..f76f4602620 100644 --- a/psychopy/iohub/devices/__init__.py +++ b/psychopy/iohub/devices/__init__.py @@ -1,8 +1,6 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function diff --git a/psychopy/iohub/devices/computer.py b/psychopy/iohub/devices/computer.py index 836d99b3580..ee8e3e0d44b 100644 --- a/psychopy/iohub/devices/computer.py +++ b/psychopy/iohub/devices/computer.py @@ -1,8 +1,6 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import from __future__ import print_function diff --git a/psychopy/iohub/devices/deviceConfigValidation.py b/psychopy/iohub/devices/deviceConfigValidation.py index 5bbfc48b2e0..47723700af0 100644 --- a/psychopy/iohub/devices/deviceConfigValidation.py +++ b/psychopy/iohub/devices/deviceConfigValidation.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from past.builtins import basestring import socket diff --git a/psychopy/iohub/devices/display/__init__.py b/psychopy/iohub/devices/display/__init__.py index e27b390c162..c590d68ece1 100644 --- a/psychopy/iohub/devices/display/__init__.py +++ b/psychopy/iohub/devices/display/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). import sys @@ -231,8 +231,7 @@ def getBounds(self): def getCoordBounds(self): """Get the Display's left, top, right, and bottom border bounds, - specified in the coordinate space returned by - Display.getCoordinateType() + specified in the coordinate space returned by Display.getCoordinateType() Args: None diff --git a/psychopy/iohub/devices/eventfilters.py b/psychopy/iohub/devices/eventfilters.py index a9d60feea30..fd289f8722d 100644 --- a/psychopy/iohub/devices/eventfilters.py +++ b/psychopy/iohub/devices/eventfilters.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import from __future__ import print_function diff --git a/psychopy/iohub/devices/experiment/__init__.py b/psychopy/iohub/devices/experiment/__init__.py index 88fc0cca0dc..19dd5ef22ec 100644 --- a/psychopy/iohub/devices/experiment/__init__.py +++ b/psychopy/iohub/devices/experiment/__init__.py @@ -1,8 +1,7 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). - from past.types import basestring import numpy as N from .. import Device, DeviceEvent, Computer, Device diff --git a/psychopy/iohub/devices/eyetracker/__init__.py b/psychopy/iohub/devices/eyetracker/__init__.py index 1c9b8646536..be2da8b2e20 100644 --- a/psychopy/iohub/devices/eyetracker/__init__.py +++ b/psychopy/iohub/devices/eyetracker/__init__.py @@ -1,6 +1,6 @@ -"""ioHub Common Eye Tracker Interface""" +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from .. import Device, ioDeviceError from ...constants import DeviceConstants, EyeTrackerConstants diff --git a/psychopy/iohub/devices/eyetracker/eye_events.py b/psychopy/iohub/devices/eyetracker/eye_events.py index 356bb8a58d6..c53e848d240 100644 --- a/psychopy/iohub/devices/eyetracker/eye_events.py +++ b/psychopy/iohub/devices/eyetracker/eye_events.py @@ -1,16 +1,12 @@ -"""ioHub Common Eye Tracker Interface""" - # Part of the psychopy.iohub library. - # Copyright (C) 2012-2016 iSolver Software Solutions - # Distributed under the terms of the GNU General Public License (GPL). - +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). from .. import DeviceEvent from ...constants import EventConstants from . import EyeTrackerDevice import numpy as np -##################### Eye Tracker Sample Stream Types #################### -# - class EyeTrackerEvent(DeviceEvent): PARENT_DEVICE = EyeTrackerDevice diff --git a/psychopy/iohub/devices/eyetracker/hw/__init__.py b/psychopy/iohub/devices/eyetracker/hw/__init__.py index 9b15f906ff8..41998c05094 100644 --- a/psychopy/iohub/devices/eyetracker/hw/__init__.py +++ b/psychopy/iohub/devices/eyetracker/hw/__init__.py @@ -1,5 +1,4 @@ -"""ioHub Common Eye Tracker Interface""" # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). \ No newline at end of file diff --git a/psychopy/iohub/devices/eyetracker/hw/gazepoint/__init__.py b/psychopy/iohub/devices/eyetracker/hw/gazepoint/__init__.py index 4f67d36a10c..c49f4ec8b7d 100644 --- a/psychopy/iohub/devices/eyetracker/hw/gazepoint/__init__.py +++ b/psychopy/iohub/devices/eyetracker/hw/gazepoint/__init__.py @@ -1,7 +1,6 @@ -"""ioHub Common Eye Tracker Interface""" - +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import absolute_import diff --git a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/__init__.py b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/__init__.py index 0ef2c3bbf04..c7393e1dd1a 100644 --- a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/__init__.py +++ b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/__init__.py @@ -1,7 +1,6 @@ -"""ioHub Common Eye Tracker Interface""" - +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import absolute_import diff --git a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/default_eyetracker.yaml b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/default_eyetracker.yaml index 5ccac79c93d..ae3331a5cf7 100644 --- a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/default_eyetracker.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/default_eyetracker.yaml @@ -66,5 +66,5 @@ eyetracker.hw.gazepoint.gp3.EyeTracker: # device configuration, or you can complete any ones that are relevant for FYI # purposes only at this time. device_number: 0 - builder_hides: [ 'builder_hides', 'enable', 'name', 'device_number', 'device_timer', 'auto_report_events', 'runtime_settings.track_eyes', 'calibration.use_builtin' ] + builder_hides: [ 'builder_hides', 'enable', 'device_number', 'device_timer', 'auto_report_events', 'runtime_settings.track_eyes', 'calibration.use_builtin' ] diff --git a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/eyetracker.py b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/eyetracker.py index dca5795bcd9..ea821e598d1 100644 --- a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/eyetracker.py +++ b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/eyetracker.py @@ -1,9 +1,7 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). -# .. fileauthor:: Martin Guest -# .. fileauthor:: Sol Simpson from __future__ import division from ......errors import print2err, printExceptionDetailsToStdErr from ......constants import EventConstants, EyeTrackerConstants @@ -440,17 +438,20 @@ def runSetupProcedure(self): self._waitForAck('CALIBRATE_DELAY', timeout=2.0) self._gp3set('CALIBRATE_SHOW', STATE=1) - self._gp3set('CALIBRATE_START', STATE=1) - cal_result = self._waitForAck('CALIB_RESULT', timeout=30.0) + self._gp3set('CALIBRATE_START', STATE=1) - if cal_result: - #print2err("GP3 calibration done.") - #print2err("Closing GP3 calibration window....") + cal_result = self._waitForAck('CALIB_RESULT', timeout=30.0) + if cal_result: self._gp3set('CALIBRATE_SHOW', STATE=0) self._gp3get('CALIBRATE_RESULT_SUMMARY') - - cal_result['SUMMARY']=self._waitForAck('CALIBRATE_RESULT_SUMMARY') - #print2err("CAL_RESULT: ",cal_result) + del cal_result['type'] + del cal_result['ID'] + + cal_summary = self._waitForAck('CALIBRATE_RESULT_SUMMARY') + del cal_summary['type'] + del cal_summary['ID'] + cal_result['SUMMARY'] = cal_summary + return cal_result def _poll(self): diff --git a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/supported_config_settings.yaml b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/supported_config_settings.yaml index fd602ba7833..8cd7662fa72 100644 --- a/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/supported_config_settings.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/gazepoint/gp3/supported_config_settings.yaml @@ -1,9 +1,5 @@ eyetracker.hw.gazepoint.gp3.EyeTracker: - name: - IOHUB_STRING: - min_length: 1 - max_length: 32 - first_char_alpha: True + name: tracker enable: IOHUB_BOOL model_name: IOHUB_LIST: @@ -52,4 +48,4 @@ eyetracker.hw.gazepoint.gp3.EyeTracker: max: 2.5 device_number: 0 manufacturer_name: GazePoint - builder_hides: ['builder_hides', 'enable', 'name', 'device_number', 'device_timer', 'auto_report_events', 'runtime_settings.track_eyes', 'calibration.use_builtin'] + builder_hides: ['builder_hides', 'enable', 'device_number', 'device_timer', 'auto_report_events', 'runtime_settings.track_eyes', 'calibration.use_builtin'] diff --git a/psychopy/iohub/devices/eyetracker/hw/mouse/__init__.py b/psychopy/iohub/devices/eyetracker/hw/mouse/__init__.py index 63468191ae5..8b86d234bf8 100644 --- a/psychopy/iohub/devices/eyetracker/hw/mouse/__init__.py +++ b/psychopy/iohub/devices/eyetracker/hw/mouse/__init__.py @@ -1,6 +1,6 @@ -"""ioHub Common Eye Tracker Interface""" +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from .eyetracker import EyeTracker from psychopy.iohub.devices.eyetracker import (MonocularEyeSampleEvent, FixationStartEvent, FixationEndEvent, diff --git a/psychopy/iohub/devices/eyetracker/hw/mouse/default_eyetracker.yaml b/psychopy/iohub/devices/eyetracker/hw/mouse/default_eyetracker.yaml index c8e9f823d6b..3cff6d62e51 100644 --- a/psychopy/iohub/devices/eyetracker/hw/mouse/default_eyetracker.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/mouse/default_eyetracker.yaml @@ -18,5 +18,5 @@ eyetracker.hw.mouse.EyeTracker: manufacturer_name: MouseGaze auto_report_events: False device_number: 0 - builder_hides: ['builder_hides', 'enable', 'name', 'device_number', 'device_timer', 'auto_report_events'] + builder_hides: ['builder_hides', 'enable', 'device_number', 'device_timer', 'auto_report_events'] diff --git a/psychopy/iohub/devices/eyetracker/hw/mouse/eyetracker.py b/psychopy/iohub/devices/eyetracker/hw/mouse/eyetracker.py index 11d587d4a93..3bd3bc8487a 100644 --- a/psychopy/iohub/devices/eyetracker/hw/mouse/eyetracker.py +++ b/psychopy/iohub/devices/eyetracker/hw/mouse/eyetracker.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from psychopy.iohub.errors import print2err, printExceptionDetailsToStdErr from psychopy.iohub.constants import EyeTrackerConstants, EventConstants diff --git a/psychopy/iohub/devices/eyetracker/hw/mouse/supported_config_settings.yaml b/psychopy/iohub/devices/eyetracker/hw/mouse/supported_config_settings.yaml index ba98370d2ce..1e2d41836af 100644 --- a/psychopy/iohub/devices/eyetracker/hw/mouse/supported_config_settings.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/mouse/supported_config_settings.yaml @@ -43,4 +43,4 @@ eyetracker.hw.mouse.EyeTracker: track_eyes: RIGHT_EYE device_number: 0 manufacturer_name: MouseGaze - builder_hides: ['builder_hides', 'enable', 'name', 'device_number', 'device_timer', 'auto_report_events'] + builder_hides: ['builder_hides', 'enable', 'device_number', 'device_timer', 'auto_report_events'] diff --git a/psychopy/iohub/devices/eyetracker/hw/sr_research/__init__.py b/psychopy/iohub/devices/eyetracker/hw/sr_research/__init__.py index 1f46e00f341..41998c05094 100644 --- a/psychopy/iohub/devices/eyetracker/hw/sr_research/__init__.py +++ b/psychopy/iohub/devices/eyetracker/hw/sr_research/__init__.py @@ -1,4 +1,4 @@ -"""ioHub Common Eye Tracker Interface for EyeLink(C) Systems""" - # Part of the psychopy.iohub library. - # Copyright (C) 2012-2016 iSolver Software Solutions - # Distributed under the terms of the GNU General Public License (GPL). \ No newline at end of file +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). \ No newline at end of file diff --git a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/__init__.py b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/__init__.py index 41c98d4d3b0..50dde39f6ad 100644 --- a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/__init__.py +++ b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/__init__.py @@ -1,6 +1,6 @@ -"""ioHub Common Eye Tracker Interface for EyeLink(C) Systems""" +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from .eyetracker import (EyeTracker, MonocularEyeSampleEvent, BinocularEyeSampleEvent, FixationStartEvent, diff --git a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/default_eyetracker.yaml b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/default_eyetracker.yaml index b3c6381648c..0f5c8f9ac7e 100644 --- a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/default_eyetracker.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/default_eyetracker.yaml @@ -219,4 +219,4 @@ eyetracker.hw.sr_research.eyelink.EyeTracker: # device_number is not used by this device type. # device_number: 0 - builder_hides: ['builder_hides', 'enable', 'name', 'device_number', 'device_timer', 'auto_report_events'] \ No newline at end of file + builder_hides: ['builder_hides', 'enable', 'device_number', 'device_timer', 'auto_report_events'] \ No newline at end of file diff --git a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyeLinkCoreGraphicsIOHubPsychopy.py b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyeLinkCoreGraphicsIOHubPsychopy.py index 3edb97e2fd9..4200c380e08 100644 --- a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyeLinkCoreGraphicsIOHubPsychopy.py +++ b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyeLinkCoreGraphicsIOHubPsychopy.py @@ -1,9 +1,6 @@ -""" -ioHub Common Eye Tracker Interface for EyeLink(C) Systems. -EyeLink(C) calibration graphics implemented using PsychoPy. -""" -# Part of the PsychoPy.iohub library -# Copyright (C) 2012-2016 iSolver Software Solutions +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). import numpy as np from PIL import Image, ImageOps @@ -506,6 +503,8 @@ def _handleEvent(self, event): self.state = 'validation' elif char == 'a': pylink_key = ord(char) + elif char == 'o': + pylink_key = ord(char) elif char == 'pageup': pylink_key = pylink.PAGE_UP elif char == 'pagedown': diff --git a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyetracker.py b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyetracker.py index a4fb8e30446..647b2eba88a 100644 --- a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyetracker.py +++ b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/eyetracker.py @@ -1,6 +1,6 @@ -"""ioHub Common Eye Tracker Interface for EyeLink(C) Systems""" -# Part of the PsychoPy.iohub library -# Copyright (C) 2012-2016 iSolver Software Solutions +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). import os import numpy as np @@ -400,9 +400,6 @@ def runSetupProcedure(self): * ESC can be pressed at any time to exit the current state of the setup procedure and return to the initial blank screen state. * O = Exit the runSetupProcedure method and continue with the experiment. """ -# if starting_state != EyeTrackerConstants.DEFAULT_SETUP_PROCEDURE: -# printExceptionDetailsToStdErr() - try: from . import eyeLinkCoreGraphicsIOHubPsychopy EyeLinkCoreGraphicsIOHubPsychopy = eyeLinkCoreGraphicsIOHubPsychopy.EyeLinkCoreGraphicsIOHubPsychopy @@ -415,8 +412,7 @@ def runSetupProcedure(self): targetOuterDiameter = circle_attributes.get('outer_diameter') targetInnerDiameter = circle_attributes.get('inner_diameter') - genv = EyeLinkCoreGraphicsIOHubPsychopy( - self, + genv = EyeLinkCoreGraphicsIOHubPsychopy(self, targetForegroundColor=targetForegroundColor, targetBackgroundColor=targetBackgroundColor, screenColor=screenColor, @@ -424,13 +420,38 @@ def runSetupProcedure(self): targetInnerDiameter=targetInnerDiameter) pylink.openGraphicsEx(genv) + self._eyelink.doTrackerSetup() + + m = self._eyelink.getCalibrationMessage() + r = self._eyelink.getCalibrationResult() + + # from pylink docs, getCalibrationResult should return: + # + # NO_REPLY if calibration not completed yet. + # OK_RESULT(0) if success. + # ABORT_REPLY(27) if 'ESC' key aborted calibration. + # -1 if calibration failed. + # 1 if poor calibration or excessive validation error. + # + # but it returns 1000. ?? + # + # getCalibrationResult returns "calibration_result: 0", where + # 0 == OK_RESULT == successful calibration. + # TODO: Test if eyelink returns different calibration_result if calibration fails. + reply = dict(message=m, result=r) + # reply is returning: + # {'message': 'calibration_result: 0', 'result': 1000} + # on a successful calibration. + # TODO: Parse into more meaningful message if possible. + genv._unregisterEventMonitors() genv.clearAllEventBuffers() genv.window.close() del genv.window del genv - return EyeTrackerConstants.EYETRACKER_OK + + return reply except Exception as e: printExceptionDetailsToStdErr() @@ -1455,8 +1476,7 @@ def _addCommandFunctions(self): self._COMMAND_TO_FUNCTION['doDriftCorrect'] = _doDriftCorrect self._COMMAND_TO_FUNCTION['eyeAvailable'] = _eyeAvailable self._COMMAND_TO_FUNCTION['enableDummyOpen'] = _dummyOpen - self._COMMAND_TO_FUNCTION[ - 'getLastCalibrationInfo'] = _getCalibrationMessage + self._COMMAND_TO_FUNCTION['getLastCalibrationInfo'] = _getCalibrationMessage self._COMMAND_TO_FUNCTION['applyDriftCorrect'] = _applyDriftCorrect self._COMMAND_TO_FUNCTION['setIPAddress'] = _setIPAddress self._COMMAND_TO_FUNCTION['setLockEye'] = _setLockEye @@ -1580,7 +1600,7 @@ def _getSamplingRate(self): def _getTrackerMode(*args, **kwargs): try: - r = pylink.getEyeLink().getTrackerMode() + r = pylink.getEYELINK().getTrackerMode() return _EYELINK_HOST_MODES[r] except Exception as e: printExceptionDetailsToStdErr() @@ -1590,7 +1610,7 @@ def _doDriftCorrect(*args, **kwargs): try: if len(args) == 4: x, y, draw, allow_setup = args - r = pylink.getEyeLink().doDriftCorrect(x, y, draw, allow_setup) + r = pylink.getEYELINK().doDriftCorrect(x, y, draw, allow_setup) return r else: print2err('doDriftCorrect requires 4 parameters, received: ', args) @@ -1601,7 +1621,7 @@ def _doDriftCorrect(*args, **kwargs): def _applyDriftCorrect(): try: - r = pylink.getEyeLink().applyDriftCorrect() + r = pylink.getEYELINK().applyDriftCorrect() if r == 0: return True else: @@ -1612,7 +1632,7 @@ def _applyDriftCorrect(): def _eyeAvailable(*args, **kwargs): try: - r = pylink.getEyeLink().eyeAvailable() + r = pylink.getEYELINK().eyeAvailable() if r == 0: return EyeTrackerConstants.getName(EyeTrackerConstants.LEFT_EYE) elif r == 1: @@ -1627,7 +1647,7 @@ def _eyeAvailable(*args, **kwargs): def _dummyOpen(*args, **kwargs): try: - r = pylink.getEyeLink().dummy_open() + r = pylink.getEYELINK().dummy_open() return r except Exception as e: printExceptionDetailsToStdErr() @@ -1635,15 +1655,13 @@ def _dummyOpen(*args, **kwargs): def _getCalibrationMessage(*args, **kwargs): try: - m = pylink.getEyeLink().getCalibrationMessage() - r = pylink.getEyeLink().getCalibrationResult() + m = pylink.getEYELINK().getCalibrationMessage() + r = pylink.getEYELINK().getCalibrationResult() if r in _eyeLinkCalibrationResultDict: r = _eyeLinkCalibrationResultDict[r] else: r = 'NO_REPLY' - rString = 'Last Calibration Message:\n{0}\n\nLastCalibrationResult:\n{1}'.format( - m, r) - return rString + return dict(message=m, result=r) except Exception as e: printExceptionDetailsToStdErr() @@ -1652,7 +1670,7 @@ def _setIPAddress(*args, **kwargs): try: if len(args) == 1: ipString = args[0] - r = pylink.getEyeLink().setAddress(ipString) + r = pylink.getEYELINK().setAddress(ipString) if r == 0: return True return [ @@ -1667,7 +1685,7 @@ def _setLockEye(*args, **kwargs): try: if len(args) == 1: enable = args[0] - r = pylink.getEyeLink().sendCommand('lock_eye_after_calibration %d' % (enable)) + r = pylink.getEYELINK().sendCommand('lock_eye_after_calibration %d' % (enable)) return r return ['EYE_TRACKER_ERROR', 'setLockEye', 'One argument is required, bool type.'] diff --git a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/supported_config_settings.yaml b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/supported_config_settings.yaml index d4850951c18..31a3c4149b5 100644 --- a/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/supported_config_settings.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/sr_research/eyelink/supported_config_settings.yaml @@ -1,9 +1,5 @@ eyetracker.hw.sr_research.eyelink.EyeTracker: - name: - IOHUB_STRING: - min_length: 1 - max_length: 32 - first_char_alpha: True + name: tracker enable: IOHUB_BOOL save_events: IOHUB_BOOL stream_events: IOHUB_BOOL @@ -105,6 +101,6 @@ eyetracker.hw.sr_research.eyelink.EyeTracker: # manufacturer_name: Used by Builder as the displayed name in the eye tracker selection dropdown. # manufacturer_name: SR Research Ltd - builder_hides: ['builder_hides', 'enable', 'name', 'device_number', 'device_timer', 'auto_report_events'] + builder_hides: ['builder_hides', 'enable', 'device_number', 'device_timer', 'auto_report_events'] diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/__init__.py b/psychopy/iohub/devices/eyetracker/hw/tobii/__init__.py index c4e85f525eb..4e7fa4d5ca3 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/__init__.py +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/__init__.py @@ -1,8 +1,7 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). -"""ioHub Common Eye Tracker Interface for Tobii (C) Eye Tracking System""" from __future__ import absolute_import diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/default_eyetracker.yaml b/psychopy/iohub/devices/eyetracker/hw/tobii/default_eyetracker.yaml index 4987cf404b3..dffbfc07d2e 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/default_eyetracker.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/default_eyetracker.yaml @@ -27,7 +27,7 @@ eyetracker.hw.tobii.EyeTracker: # The model name of the Tobii device that you wish to connect to can be specified here, # and only Tobii systems matching that model name will be considered as possible candidates for connection. # If you only have one Tobii system connected to the computer, this field can just be left empty. - model_name: Any Pro Model + model_name: # The serial number of the Tobii device that you wish to connect to can be specified here, # and only the Tobii system matching that serial number will be connected to, if found. @@ -36,11 +36,6 @@ eyetracker.hw.tobii.EyeTracker: serial_number: calibration: - # Should the PsychoPy Window created by the PsychoPy Process be minimized - # before displaying the Calibration Window created by the ioHub Process. - # - minimize_psychopy_win: False - # The Tobii ioHub Common Eye Tracker Interface currently support # a 3, 5 and 9 point calibration mode. # THREE_POINTS,FIVE_POINTS,NINE_POINTS @@ -150,7 +145,8 @@ eyetracker.hw.tobii.EyeTracker: runtime_settings: # The supported sampling rates for Tobii are model dependent. - # Using a default of 60 Hz, with the assumption it is the most common. + # If the sampling rate specified here is not supported by the model being used, + # the Tobii device will continue to use it's current sampling rate. sampling_rate: 60 # Tobii supports BINOCULAR tracking mode only. @@ -165,4 +161,4 @@ eyetracker.hw.tobii.EyeTracker: device_number: 0 - builder_hides: ['builder_hides', 'enable', 'name', 'calibration.target_positions', 'device_number', 'auto_report_events'] \ No newline at end of file + builder_hides: ['builder_hides', 'enable', 'calibration.target_positions', 'device_number', 'auto_report_events'] \ No newline at end of file diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/eyetracker.py b/psychopy/iohub/devices/eyetracker/hw/tobii/eyetracker.py index 5fd334b5d70..7b678f423fa 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/eyetracker.py +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/eyetracker.py @@ -1,9 +1,7 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). - -"""ioHub Common Eye Tracker Interface for Tobii (C) Eye Tracking System""" from __future__ import absolute_import import math from .....constants import EventConstants, EyeTrackerConstants @@ -187,11 +185,9 @@ def runSetupProcedure(self): from .tobiiCalibrationGraphics import TobiiPsychopyCalibrationGraphics calibration_properties = self.getConfiguration().get('calibration') - screenColor = calibration_properties.get( - 'screen_background_color') # [r,g,b] of screen + screenColor = calibration_properties.get('screen_background_color') # [r,g,b] of screen - genv = TobiiPsychopyCalibrationGraphics( - self, screenColor=screenColor) + genv = TobiiPsychopyCalibrationGraphics(self, screenColor=screenColor) calibrationOK = genv.runCalibration() diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/supported_config_settings.yaml b/psychopy/iohub/devices/eyetracker/hw/tobii/supported_config_settings.yaml index 1620aca88f2..aa3bf538923 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/supported_config_settings.yaml +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/supported_config_settings.yaml @@ -1,15 +1,10 @@ eyetracker.hw.tobii.EyeTracker: - name: - IOHUB_STRING: - min_length: 1 - max_length: 32 - first_char_alpha: True + name: tracker enable: IOHUB_BOOL model_name: - IOHUB_LIST: - valid_values: [Any Pro Model, Nano, Fusion, Spectrum, T120, X120, TX300, X2, X3] - min_length: 1 - max_length: 1 + IOHUB_STRING: + min_length: 0 + max_length: 64 serial_number: IOHUB_STRING: min_length: 0 @@ -24,13 +19,11 @@ eyetracker.hw.tobii.EyeTracker: monitor_event_types: [BinocularEyeSampleEvent,] runtime_settings: sampling_rate: - IOHUB_LIST: - valid_values: [30, 60, 120, 250, 300, 1200] - min_length: 1 - max_length: 1 + IOHUB_INT: + min: 30 + max: 1200 track_eyes: [BINOCULAR,] calibration: - minimize_psychopy_win: IOHUB_BOOL # The Tobii ioHub Common Eye Tracker Interface currently support # a 3, 5 and 9 point calibration mode. # THREE_POINTS,FIVE_POINTS,NINE_POINTS @@ -86,4 +79,4 @@ eyetracker.hw.tobii.EyeTracker: contract_only: IOHUB_BOOL device_number: 0 manufacturer_name: Tobii Technology - builder_hides: ['builder_hides', 'enable', 'name', 'device_number', 'calibration.target_positions', 'auto_report_events'] \ No newline at end of file + builder_hides: ['builder_hides', 'enable', 'device_number', 'calibration.target_positions', 'auto_report_events'] \ No newline at end of file diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiCalibrationGraphics.py b/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiCalibrationGraphics.py index 25fc05f8a27..4e84df20279 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiCalibrationGraphics.py +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiCalibrationGraphics.py @@ -1,10 +1,6 @@ -""" -ioHub Common Eye Tracker Interface for Tobii (C) Eye Tracking System. -Calibration graphics implemented using PsychoPy. -""" # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). import psychopy @@ -232,8 +228,8 @@ def _createStim(self): self.marker_heights = (-sh / 2.0 * .7, -sh / 2.0 * .75, -sh / 2.0 * .8, -sh / 2.0 * .7, -sh / 2.0 * .75, -sh / 2.0 * .8) - bar_vertices = [-hbox_bar_length / 2, -hbox_bar_height / 2], [hbox_bar_length / 2, -hbox_bar_height / - 2], [hbox_bar_length / 2, hbox_bar_height / 2], [-hbox_bar_length / 2, hbox_bar_height / 2] + bar_vertices = ([-hbox_bar_length / 2, -hbox_bar_height / 2], [hbox_bar_length / 2, -hbox_bar_height / 2], + [hbox_bar_length / 2, hbox_bar_height / 2], [-hbox_bar_length / 2, hbox_bar_height / 2]) self.feedback_resources = OrderedDict() @@ -342,12 +338,9 @@ def runCalibration(self): if not continue_calibration: return False - auto_pace = self._eyetrackerinterface.getConfiguration()['calibration'][ - 'auto_pace'] - pacing_speed = self._eyetrackerinterface.getConfiguration()['calibration'][ - 'pacing_speed'] - randomize_points = self._eyetrackerinterface.getConfiguration()['calibration'][ - 'randomize'] + auto_pace = self._eyetrackerinterface.getConfiguration()['calibration']['auto_pace'] + pacing_speed = self._eyetrackerinterface.getConfiguration()['calibration']['pacing_speed'] + randomize_points = self._eyetrackerinterface.getConfiguration()['calibration']['randomize'] cal_target_list = self.CALIBRATION_POINT_LIST[1:-1] if randomize_points is True: @@ -414,8 +407,9 @@ def waitingForNextTargetTime(): self.clearCalibrationWindow() self.clearAllEventBuffers() + calibration_result = None if _quit: - return False + return calibration_result self._lastCalibrationOK = False if calibration: @@ -426,20 +420,19 @@ def waitingForNextTargetTime(): self._lastCalibrationOK = False calibration.leave_calibration_mode() calibration = None - - - + if self._lastCalibrationOK is False: instuction_text = 'Calibration Failed. Options: SPACE: Re-run Calibration; ESCAPE: Exit Setup' continue_method = self.showSystemSetupMessageScreen( instuction_text, True, msg_types=['SPACE_KEY_ACTION', 'QUIT']) if continue_method is False: return self.runCalibration() - return False + return calibration_result instuction_text = "Calibration Passed. PRESS 'SPACE' KEY TO CONTINUE." self.showSystemSetupMessageScreen(instuction_text, True, msg_types=['SPACE_KEY_ACTION']) - return True + + return calibration_result def clearCalibrationWindow(self): self.window.flip(clearBuffer=True) diff --git a/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiwrapper.py b/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiwrapper.py index 35cff850298..4c1c4ad5bc5 100644 --- a/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiwrapper.py +++ b/psychopy/iohub/devices/eyetracker/hw/tobii/tobiiwrapper.py @@ -1,4 +1,7 @@ -"""ioHub Common Eye Tracker Interface for Tobii (C) Eye Tracking System.""" +# -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). from __future__ import print_function # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. diff --git a/psychopy/iohub/devices/keyboard/__init__.py b/psychopy/iohub/devices/keyboard/__init__.py index d37083aba1a..776a133b2a6 100644 --- a/psychopy/iohub/devices/keyboard/__init__.py +++ b/psychopy/iohub/devices/keyboard/__init__.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). global Keyboard diff --git a/psychopy/iohub/devices/keyboard/darwin.py b/psychopy/iohub/devices/keyboard/darwin.py index 36e45a96a3f..a927b94a971 100644 --- a/psychopy/iohub/devices/keyboard/darwin.py +++ b/psychopy/iohub/devices/keyboard/darwin.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from copy import copy import Quartz as Qz diff --git a/psychopy/iohub/devices/keyboard/darwinkey.py b/psychopy/iohub/devices/keyboard/darwinkey.py index eab186c0762..3b68b2099e2 100644 --- a/psychopy/iohub/devices/keyboard/darwinkey.py +++ b/psychopy/iohub/devices/keyboard/darwinkey.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). # /System/Library/Frameworks/Carbon.framework/Versions/A/Frameworks/ diff --git a/psychopy/iohub/devices/keyboard/linux2.py b/psychopy/iohub/devices/keyboard/linux2.py index f04ad080c04..17d883ab248 100644 --- a/psychopy/iohub/devices/keyboard/linux2.py +++ b/psychopy/iohub/devices/keyboard/linux2.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from . import ioHubKeyboardDevice diff --git a/psychopy/iohub/devices/keyboard/win32.py b/psychopy/iohub/devices/keyboard/win32.py index a95f2d041df..e1306574d70 100644 --- a/psychopy/iohub/devices/keyboard/win32.py +++ b/psychopy/iohub/devices/keyboard/win32.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). try: diff --git a/psychopy/iohub/devices/mouse/__init__.py b/psychopy/iohub/devices/mouse/__init__.py index bfbcf7d2b90..0c8be0f4fff 100644 --- a/psychopy/iohub/devices/mouse/__init__.py +++ b/psychopy/iohub/devices/mouse/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, print_function, absolute_import diff --git a/psychopy/iohub/devices/mouse/darwin.py b/psychopy/iohub/devices/mouse/darwin.py index 2028a6d2047..59dedf37af6 100644 --- a/psychopy/iohub/devices/mouse/darwin.py +++ b/psychopy/iohub/devices/mouse/darwin.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, print_function, absolute_import diff --git a/psychopy/iohub/devices/mouse/linux2.py b/psychopy/iohub/devices/mouse/linux2.py index 9584939e807..f4adca7cd80 100644 --- a/psychopy/iohub/devices/mouse/linux2.py +++ b/psychopy/iohub/devices/mouse/linux2.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, print_function, absolute_import diff --git a/psychopy/iohub/devices/mouse/win32.py b/psychopy/iohub/devices/mouse/win32.py index 79ec13cf436..0ee0eb07b41 100644 --- a/psychopy/iohub/devices/mouse/win32.py +++ b/psychopy/iohub/devices/mouse/win32.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, print_function, absolute_import diff --git a/psychopy/iohub/devices/serial/__init__.py b/psychopy/iohub/devices/serial/__init__.py index 3550fd8202d..b5fb7b7a405 100644 --- a/psychopy/iohub/devices/serial/__init__.py +++ b/psychopy/iohub/devices/serial/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). import serial import sys diff --git a/psychopy/iohub/devices/wintab/__init__.py b/psychopy/iohub/devices/wintab/__init__.py index d5df757f1eb..9e5283635bd 100644 --- a/psychopy/iohub/devices/wintab/__init__.py +++ b/psychopy/iohub/devices/wintab/__init__.py @@ -1,13 +1,8 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). -# -# TODO List -# -# 2) Check for missing serial numbers in PACKET evt stream. -# _is_epydoc = False # Pen digitizers /tablets that support Wintab API diff --git a/psychopy/iohub/devices/wintab/win32.py b/psychopy/iohub/devices/wintab/win32.py index 2d9e1ccf524..dde8087f317 100644 --- a/psychopy/iohub/devices/wintab/win32.py +++ b/psychopy/iohub/devices/wintab/win32.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). # Initial file based on pyglet.libs.win32 diff --git a/psychopy/iohub/errors.py b/psychopy/iohub/errors.py index e62c44ee251..a9a339ff9a1 100644 --- a/psychopy/iohub/errors.py +++ b/psychopy/iohub/errors.py @@ -1,6 +1,6 @@ -# coding=utf-8 +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import, print_function diff --git a/psychopy/iohub/net.py b/psychopy/iohub/net.py index 6080e3459ce..47af4b12ac0 100644 --- a/psychopy/iohub/net.py +++ b/psychopy/iohub/net.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import diff --git a/psychopy/iohub/server.py b/psychopy/iohub/server.py index 9b3bd12ac55..59580e89159 100644 --- a/psychopy/iohub/server.py +++ b/psychopy/iohub/server.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import diff --git a/psychopy/iohub/start_iohub_process.py b/psychopy/iohub/start_iohub_process.py index a6c4a091fc1..ead929f3184 100644 --- a/psychopy/iohub/start_iohub_process.py +++ b/psychopy/iohub/start_iohub_process.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division, absolute_import import json diff --git a/psychopy/iohub/util/__init__.py b/psychopy/iohub/util/__init__.py index 8d938e74428..204c3727d57 100644 --- a/psychopy/iohub/util/__init__.py +++ b/psychopy/iohub/util/__init__.py @@ -1,5 +1,6 @@ +# -*- coding: utf-8 -*- # Part of the psychopy.iohub library. -# Copyright (C) 2012-2016 iSolver Software Solutions +# Copyright (C) 2012-2021 iSolver Software Solutions # Distributed under the terms of the GNU General Public License (GPL). from __future__ import division @@ -144,7 +145,6 @@ def getDevicePaths(device_name=""): iohub_device_path = module_directory(import_device) if device_name: iohub_device_path = os.path.join(iohub_device_path, device_name.replace('.', os.path.sep)) - scs_yaml_paths = [] for root, dirs, files in os.walk(iohub_device_path): device_folder = None @@ -182,6 +182,8 @@ def getDeviceDefaultConfig(device_name, builder_hides=True): 'save_events': True, 'stream_events': True} """ + if device_name.endswith(".EyeTracker"): + device_name = device_name[:-11] device_paths = getDevicePaths(device_name) device_configs = [] for dpath, dconf in device_paths: @@ -217,23 +219,30 @@ def getDeviceDefaultConfig(device_name, builder_hides=True): _iohub2builderInputType = dict(IOHUB_STRING='single', IOHUB_BOOL='bool', IOHUB_FLOAT='single', IOHUB_INT='single', IOHUB_LIST=('choice','multi'), IOHUB_COLOR='color', IOHUB_IP_ADDRESS_V4='single') -def getDeviceNames(device_name="eyetracker.hw"): +def getDeviceNames(device_name="eyetracker.hw", get_paths=True): """ - Return a list of iohub eye tracker device names, as would be used as keys to launchHubServer. + Return a list of iohub eye tracker device names, as would be used as keys to launchHubServer. If get_paths is true, + return both device manufacturer name (for display in builder) as well as iohub device name. Example: eyetrackers = getDeviceNames() print(eyetrackers) Output: - ['eyetracker.hw.gazepoint.gp3', 'eyetracker.hw.sr_research.eyelink', 'eyetracker.hw.tobii'] + [('GazePoint', 'eyetracker.hw.gazepoint.gp3.EyeTracker'), + ('MouseGaze', 'eyetracker.hw.mouse.EyeTracker'), + ('SR Research Ltd', 'eyetracker.hw.sr_research.eyelink.EyeTracker'), + ('Tobii Technology', 'eyetracker.hw.tobii.EyeTracker')] """ names = [] dconfigs = getDeviceDefaultConfig(device_name) for dcfg in dconfigs: - d_name = tuple(dcfg.keys())[0] - d_name = d_name[:d_name.rfind('.')] - names.append(d_name) + d_path = tuple(dcfg.keys())[0] + d_config = tuple(dcfg.values())[0] + if get_paths is False: + names.append(d_path) + else: + names.append((d_config.get('manufacturer_name'), d_path)) return names def getDeviceFile(device_name, file_name): @@ -244,6 +253,8 @@ def getDeviceFile(device_name, file_name): :param: file_name: name of device yaml file to load :return: dict """ + if device_name.endswith(".EyeTracker"): + device_name = device_name[:-11] device_paths = getDevicePaths(device_name) device_sconfigs = [] for dpath, _ in device_paths: @@ -354,7 +365,7 @@ def settings2Params(parent_list, settings): slabel = slabel+k.replace("_", " ").title() if isinstance(sconfig_data, dict): - iohub_type, type_constraints =list(sconfig_data.items())[0] + iohub_type, type_constraints = list(sconfig_data.items())[0] builderValType = _iohub2builderValType[iohub_type] builderInputType = _iohub2builderInputType[iohub_type] valid_values = None @@ -364,6 +375,7 @@ def settings2Params(parent_list, settings): builderInputType = builderInputType[0] else: builderInputType = builderInputType[1] + builderValType = type(valid_values[0]) if valid_values: nv = dict(valType=builderValType, inputType=builderInputType, defaultVal=v, allowedVals=valid_values, hint=shint, label=slabel) @@ -371,7 +383,7 @@ def settings2Params(parent_list, settings): nv = dict(valType=builderValType, inputType=builderInputType, defaultVal=v, hint=shint, label=slabel) elif isinstance(sconfig_data, list): - nv = dict(valType='list', inputType='static', defaultVal=v, hint=shint, label=slabel) + nv = dict(valType=type(v), inputType='static', defaultVal=v, hint=shint, label=slabel) elif sconfig_data in _iohub2builderValType.keys(): nv = dict(valType=_iohub2builderValType[sconfig_data], inputType=_iohub2builderInputType[sconfig_data], defaultVal=v, diff --git a/psychopy/iohub/util/visualangle.py b/psychopy/iohub/util/visualangle.py index d728c951af4..6567763a0bc 100644 --- a/psychopy/iohub/util/visualangle.py +++ b/psychopy/iohub/util/visualangle.py @@ -1,4 +1,7 @@ # -*- coding: utf-8 -*- +# Part of the psychopy.iohub library. +# Copyright (C) 2012-2021 iSolver Software Solutions +# Distributed under the terms of the GNU General Public License (GPL). from __future__ import division from builtins import object """