From 538bfb103028678e6bab77d688051d4d080e51ce Mon Sep 17 00:00:00 2001 From: Scott Huberty Date: Mon, 18 Mar 2024 14:56:42 -0700 Subject: [PATCH 1/8] Add author name Hoeks to ignore_words --- ignore_words.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/ignore_words.txt b/ignore_words.txt index acf20c0a58a..db2324637d5 100644 --- a/ignore_words.txt +++ b/ignore_words.txt @@ -38,3 +38,4 @@ pres aas vor connec +Hoeks From 8d5d749968a8acfafc56e36d7e9ac83ca146da1f Mon Sep 17 00:00:00 2001 From: Scott Huberty Date: Mon, 18 Mar 2024 15:11:11 -0700 Subject: [PATCH 2/8] ENH: add pupil deconvolution method, ported from pyeparse This basically ports the code from pyeparse, with slight updates to work with mne and match mne and modern python conventions. --- doc/api/preprocessing.rst | 3 + doc/references.bib | 23 ++ ignore_words.txt | 2 +- mne/preprocessing/eyetracking/__init__.py | 2 +- .../eyetracking/_pupillometry.py | 269 +++++++++++++++++- .../eyetracking/tests/test_pupillometry.py | 59 +++- mne/utils/docs.py | 10 + 7 files changed, 361 insertions(+), 7 deletions(-) diff --git a/doc/api/preprocessing.rst b/doc/api/preprocessing.rst index 1e0e9e56079..b7dc7fabc85 100644 --- a/doc/api/preprocessing.rst +++ b/doc/api/preprocessing.rst @@ -166,6 +166,9 @@ Projections: convert_units get_screen_visual_angle interpolate_blinks + deconvolve + pupil_zscores + pupil_kernel EEG referencing: diff --git a/doc/references.bib b/doc/references.bib index 7a992b2c1fa..43438ddc0dc 100644 --- a/doc/references.bib +++ b/doc/references.bib @@ -823,6 +823,17 @@ @article{HippEtAl2012 year = {2012} } +@article{Hoeks1993, + author = {Hoeks, Bert and Levelt, Willem J M}, + doi = {10.3758/BF03204445}, + journal = {Behavior Research Methods, Instruments, & Computers}, + number = {1}, + pages = {16--26}, + title = {Pupillary dilation as a measure of attention: a quantitative system analysis}, + volume = {25}, + year = {1993} +} + @article{HoldgrafEtAl2016, author = {Holdgraf, Christopher R. and {de Heer}, Wendy and Pasley, Brian and Rieger, Jochem and Crone, Nathan and Lin, Jack J. and Knight, Robert T. and Theunissen, Frédéric E.}, doi = {10.1038/ncomms13654}, @@ -2153,6 +2164,18 @@ @inproceedings{StrohmeierEtAl2015 pages = {21--24} } +@article{Wierda2012, + title = "Pupil dilation deconvolution reveals the dynamics of attention + at high temporal resolution", + author = "Wierda, Stefan M and van Rijn, Hedderik and Taatgen, Niels A and + Martens, Sander", + journal = "Proceedings of the National Academy of Sciences", + volume = 109, + number = 22, + pages = "8456--8460", + year = 2012, +} + @misc{WikipediaSI, author = "{Wikipedia contributors}", title = "International System of Units — {Wikipedia}{,} The Free Encyclopedia", diff --git a/ignore_words.txt b/ignore_words.txt index db2324637d5..f846f4ce4c0 100644 --- a/ignore_words.txt +++ b/ignore_words.txt @@ -38,4 +38,4 @@ pres aas vor connec -Hoeks +hoeks diff --git a/mne/preprocessing/eyetracking/__init__.py b/mne/preprocessing/eyetracking/__init__.py index efab0fb079d..98017381131 100644 --- a/mne/preprocessing/eyetracking/__init__.py +++ b/mne/preprocessing/eyetracking/__init__.py @@ -7,5 +7,5 @@ from .eyetracking import set_channel_types_eyetrack, convert_units from .calibration import Calibration, read_eyelink_calibration -from ._pupillometry import interpolate_blinks +from ._pupillometry import interpolate_blinks, deconvolve, pupil_kernel, pupil_zscores from .utils import get_screen_visual_angle diff --git a/mne/preprocessing/eyetracking/_pupillometry.py b/mne/preprocessing/eyetracking/_pupillometry.py index 8da124b2e1f..49b681a6bfa 100644 --- a/mne/preprocessing/eyetracking/_pupillometry.py +++ b/mne/preprocessing/eyetracking/_pupillometry.py @@ -5,9 +5,20 @@ import numpy as np +from mne import BaseEpochs +from mne._fiff.pick import _picks_to_idx +from mne.parallel import parallel_func + from ..._fiff.constants import FIFF from ...io import BaseRaw -from ...utils import _check_preload, _validate_type, logger, warn +from ...utils import ( + _check_option, + _check_preload, + _validate_type, + fill_doc, + logger, + warn, +) def interpolate_blinks(raw, buffer=0.05, match="BAD_blink", interpolate_gaze=False): @@ -115,3 +126,259 @@ def _interpolate_blinks(raw, buffer, blink_annots, interpolate_gaze): ) else: warn("No channels were interpolated.") + + +@fill_doc +def pupil_zscores(epochs, baseline=(None, 0)): + """Get normalized pupil data. + + This function normalizes pupil responses within each epoch by subtracting + the mean pupil response during a specified baseline period and then dividing + by the standard deviation of all data (across time). This may help to compare + pupil responses across epochs or participants. + + Parameters + ---------- + epochs : instance of Epochs + The epochs with pupil channels. + %(pupil_baseline)s + + Returns + ------- + pupil_data : array + An array of pupil size data, shape (``n_epochs``, ``n_channels``, ``n_times``). + """ + # Code ported from https://github.com/pyeparse/pyeparse + _check_preload(epochs, "Z-score normalization") + _validate_type(epochs, BaseEpochs, "epochs") + _validate_type(baseline, (tuple, list, np.ndarray), "baseline") + + pupil_picks = _picks_to_idx(epochs.info, "pupil", allow_empty=True) + if not pupil_picks.any(): + raise RuntimeError("no pupil data") + if len(baseline) != 2: + raise RuntimeError("baseline must be a 2-element list") + baseline = np.array(baseline) + if baseline[0] is None: + baseline[0] = epochs.times[0] + if baseline[1] is None: + baseline[1] = epochs.times[-1] + baseline = epochs.time_as_index(baseline) + zs = epochs.get_data(pupil_picks) + std = np.nanstd(zs.flat) + bl = np.nanmean(zs[..., baseline[0] : baseline[1] + 1], axis=-1) + zs -= bl[:, np.newaxis, :] + zs /= std + return zs + + +@fill_doc +def deconvolve( + epochs, + spacing=0.1, + baseline=(None, 0), + bounds=None, + max_iter=500, + kernel=None, + n_jobs=1, + acc=1e-6, + method="minimize", + reg=100, +): + """Deconvolve pupillary responses. + + Parameters + ---------- + epochs : instance of Epochs + The epochs with pupil data to deconvolve. + spacing : float | array + Spacing of time points to use for deconvolution. Can also + be an array to directly specify time points to use. + %(pupil_baseline)s + This is passed to :func:`~mne.preprocessing.eyetracking.pupil_zscores`. + bounds : array of shape (2,) | None + Limits for deconvolution values. Can be, e.g. ``(0, np.inf)`` to + constrain to positive values. If ``None``, no bounds are used. Default is + ``None``. + max_iter : int + Maximum number of iterations of minimization algorithm. Default is ``500``. + kernel : array | None + Kernel to assume when doing deconvolution. If ``None``, the + Hoeks and Levelt (1993) kernel will be used. :footcite:p:`Hoeks1993`. + %(n_jobs)s + acc : float + The requested accuracy. Lower accuracy generally means smoother + fits. + method : str + Can be ``"minimize"`` to use SLSQP or ``"inverse"`` to use + Tikhonov-regularized pseudoinverse. Default is ``"minimize"``. + reg : float + Regularization factor for pseudoinverse calculation. Only used if method is + ``"inverse"``. Default is 100. + + Returns + ------- + fit : array + Array of fits, of shape (``n_epochs``, ``n_channels``, ``n_fit_times``). + times : array + The array of times at which points were fit. + + Notes + ----- + This method is adapted from: + + Wierda et al., 2012, "Pupil dilation deconvolution reveals the + dynamics of attention at high temporal resolution." :footcite:p:`Wierda2012` + + Our implementation does not, by default, force all weights to be + greater than zero. It also does not do first-order detrending, + which the Wierda paper discusses implementing. + + References + ---------- + .. footbibliography:: + """ + from scipy import linalg + + # Code ported from https://github.com/pyeparse/pyeparse + _validate_type(spacing, (float, np.ndarray, tuple, list), "spacing") + _validate_type(bounds, (type(None), tuple, list, np.ndarray), "bounds") + _validate_type(max_iter, int, "max_iter") + _validate_type(kernel, (np.ndarray, type(None)), "kernel") + _validate_type(n_jobs, int, "n_jobs") + _validate_type(acc, float, "acc") + _validate_type(method, str, "method") + _check_option("method", method, ["minimize", "inverse"]) + _validate_type(reg, (int, float), "reg") + + if bounds is not None: + bounds = np.array(bounds) + if bounds.ndim != 1 or bounds.size != 2: + raise RuntimeError("bounds must be 2-element array or None") + if kernel is None: + kernel = pupil_kernel(epochs.info["sfreq"]) + else: + kernel = np.array(kernel, np.float64) + if kernel.ndim != 1: + raise TypeError("kernel must be 1D") + + # get the data (and make sure it exists) + pupil_data = pupil_zscores(epochs, baseline=baseline) + + # set up parallel function (and check n_jobs) + parallel, p_fun, n_jobs = parallel_func(_do_deconv, n_jobs) + + # figure out where the samples go + n_samp = len(epochs.times) + if not isinstance(spacing, (np.ndarray, tuple, list)): + times = np.arange(epochs.times[0], epochs.times[-1], spacing) + times = np.unique(times) + else: + times = np.asanyarray(spacing) + samples = epochs.time_as_index(times) + if len(samples) == 0: + warn("No usable samples") + return np.array([]), np.array([]) + + # convert bounds to slsqp representation + if bounds is not None: + bounds = np.array([bounds for _ in range(len(samples))]) + else: + bounds = [] # compatible with old version of scipy + + # Build the convolution matrix + conv_mat = np.zeros((n_samp, len(samples))) + for li, loc in enumerate(samples): + eidx = min(loc + len(kernel), n_samp) + conv_mat[loc:eidx, li] = kernel[: eidx - loc] + + # do the fitting + if method == "inverse": + u, s, v = linalg.svd(conv_mat, full_matrices=False) + # Threshold small singular values + s[s < 1e-7 * s[0]] = 0 + # Regularize non-zero singular values + s[s > 0] /= s[s > 0] ** 2 + reg + inv_conv_mat = np.dot(v.T, s[:, np.newaxis] * u.T) + fit = np.dot(pupil_data, inv_conv_mat.T) + else: # minimize + fit_fails = parallel( + p_fun(data, conv_mat, bounds, max_iter, acc) + for data in np.array_split(pupil_data, n_jobs) + ) + fit = np.concatenate([f[0] for f in fit_fails]) + fails = np.concatenate([f[1] for f in fit_fails]) + if np.any(fails): + reasons = ", ".join(str(r) for r in np.setdiff1d(np.unique(fails), [0])) + warn( + f"{np.sum(fails != 0)} out of {len(fails)} fits " + f"did not converge (reasons: {reasons})" + ) + return fit, times + + +def _do_deconv(pupil_data, conv_mat, bounds, max_iter, acc): + """Parallelize deconvolution helper function.""" + # Code ported from https://github.com/pyeparse/pyeparse + from scipy.optimize import fmin_slsqp + + x0 = np.zeros(conv_mat.shape[1]) + fit = np.empty((pupil_data.shape[0], pupil_data.shape[1], conv_mat.shape[1])) + failed = np.empty(fit.shape) + for ei, data in enumerate(pupil_data): + out = fmin_slsqp( + _score, + x0, + args=(data, conv_mat), + epsilon=1e-4, + bounds=bounds, + disp=False, + full_output=True, + iter=max_iter, + acc=acc, + ) + fit[ei, :, :] = out[0] + failed[ei, :, :] = out[3] + return fit, failed + + +def _score(vals, x_0, conv_mat): + return np.mean((x_0 - conv_mat.dot(vals)) ** 2) + + +def pupil_kernel(sfreq, dur=4.0, t_max=0.930, n=10.1, s=1.0): + """Generate pupil response kernel modeled as an Erlang gamma function. + + Parameters + ---------- + sfreq : int + Sampling frequency (samples/second) to use in generating the kernel. + dur : float + Length (in seconds) of the generated kernel. Default is ``4.0`` seconds. + t_max : float + Time (in seconds) where the response maximum is stipulated to occur. Default is + ``0.930`` seconds, as in Hoeks and Levelt (1993). :footcite:p:`Hoeks1993`. + n : float + Number of negative-exponential layers in the cascade defining the + gamma function. Default is ``10.1``, as in Hoeks and Levelt (1993). + :footcite:p:`Hoeks1993`. + s : float | None + Desired value for the area under the kernel. If ``None``, no scaling is + performed. Default is ``1.0``. + + Returns + ------- + h : array + The generated kernel. + + References + ---------- + .. footbibliography:: + """ + # Code ported from https://github.com/pyeparse/pyeparse + n_samp = int(np.round(sfreq * dur)) + t = np.arange(n_samp, dtype=float) / sfreq + h = (t**n) * np.exp(-n * t / t_max) + scal = 1.0 if s is None else float(s) / (np.sum(h) * (t[1] - t[0])) + h = scal * h + return h diff --git a/mne/preprocessing/eyetracking/tests/test_pupillometry.py b/mne/preprocessing/eyetracking/tests/test_pupillometry.py index bda1cf09f75..6a39ccaea16 100644 --- a/mne/preprocessing/eyetracking/tests/test_pupillometry.py +++ b/mne/preprocessing/eyetracking/tests/test_pupillometry.py @@ -5,13 +5,13 @@ import numpy as np import pytest -from mne import create_info +import mne from mne.datasets.testing import data_path, requires_testing_data from mne.io import RawArray, read_raw_eyelink -from mne.preprocessing.eyetracking import interpolate_blinks +from mne.preprocessing.eyetracking import deconvolve, interpolate_blinks fname = data_path(download=False) / "eyetrack" / "test_eyelink.asc" -pytest.importorskip("pandas") +# pytest.importorskip("pandas") @requires_testing_data @@ -29,7 +29,7 @@ def test_interpolate_blinks(buffer, match, cause_error, interpolate_gaze): raw = read_raw_eyelink(fname, create_annotations=["blinks"], find_overlaps=True) # Create a dummy stim channel # this will hit a certain line in the interpolate_blinks function - info = create_info(["STI"], raw.info["sfreq"], ["stim"]) + info = mne.create_info(["STI"], raw.info["sfreq"], ["stim"]) stim_data = np.zeros((1, len(raw.times))) stim_raw = RawArray(stim_data, info) raw.add_channels([stim_raw], force_update_info=True) @@ -64,3 +64,54 @@ def test_interpolate_blinks(buffer, match, cause_error, interpolate_gaze): if interpolate_gaze: assert not np.isnan(data[0, blink_ind]).any() # left eye assert not np.isnan(data[1, blink_ind]).any() # right eye + + +@pytest.mark.parametrize("method", ["minimize", "inverse"]) +def test_deconvolve(eyetrack_raw, method): + """Test deconvolving pupil data.""" + # simulate the convolved pupil data + signal = (np.zeros(30), np.ones(10), np.zeros(20), 2 * np.ones(10), np.zeros(30)) + signal = np.concatenate(signal, axis=0) + kernel = np.exp(-(np.linspace(-2, 2, 20) ** 2)) + kernel = kernel / sum(kernel) + ps = np.convolve(signal, kernel, mode="same") + + # replace the pupil data with the simulated data + raw = eyetrack_raw.copy() + raw.rename_channels({"pupil": "pupil_left"}) + raw._data[-1] = ps + epochs = mne.make_fixed_length_epochs(raw, preload=True) + + data = epochs.get_data(picks=["pupil_left"]) + fit, times = deconvolve(epochs, method=method) + # Check that we didn't change the epochs data in place + np.testing.assert_array_equal(data, epochs.get_data(picks=["pupil_left"])) + # Check that the fit has the expected shape + n_chs = data.shape[1] + np.testing.assert_equal(fit.shape, (len(epochs), n_chs, len(times))) + # Check the above with custom spacing and bounds + fit, times = deconvolve(epochs, spacing=[0, 0.3, 0.9], bounds=(0, np.inf)) + np.testing.assert_equal(fit.shape, (len(epochs), n_chs, len(times))) + np.testing.assert_equal(len(times), 3) + + +def test_pupil_zscore(eyetrack_raw): + """Test z-scoring pupil data.""" + signal = (np.zeros(30), np.ones(10), np.zeros(20), 2 * np.ones(10), np.zeros(30)) + signal = np.concatenate(signal, axis=0) + raw = eyetrack_raw.copy() + raw.rename_channels({"pupil": "pupil_left"}) + raw._data[-1] = signal + epochs = mne.make_fixed_length_epochs(raw, preload=True) + ps = mne.preprocessing.eyetracking.pupil_zscores(epochs, (None, None)) + np.testing.assert_almost_equal(ps.mean(), 0) + np.testing.assert_almost_equal(ps.std(), 1) + + +def test_pupil_kernel(): + """Test the pupil kernel function.""" + kernel = mne.preprocessing.eyetracking.pupil_kernel(2, 2, 2, 2) + np.testing.assert_equal(kernel.shape, (4,)) + kernel = mne.preprocessing.eyetracking.pupil_kernel(100, 1) + t_kernel = np.linspace(0, 1, 100) + np.testing.assert_almost_equal(t_kernel[np.argmax(kernel)], 0.93, decimal=2) diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 02bb6825b6e..11b44bc50ed 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -3172,6 +3172,16 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): the data. If ``None``, use ``projs`` from `~mne.Report` creation. """ +docdict["pupil_baseline"] = """ +baseline : tuple of length 2 + 2-element tuple of time points to use for baseline calculation. + If a tuple ``(a, b)``, the interval is between ``a`` and ``b`` (in seconds), + is used, end-inclusive. ``None`` can be used for either ``a`` or ``b`` + to specify the beginning or end of the data. If ``(None, None)``, use all time + points for baseline calculation. The default is ``(None, 0)``, which uses all + negative time points (before stimulus onset) for baseline calculation. +""" + # %% # R From 7a09a382a644c2281abc88866e56a6690f230e6a Mon Sep 17 00:00:00 2001 From: Scott Huberty <52462026+scott-huberty@users.noreply.github.com> Date: Tue, 19 Mar 2024 10:00:17 -0700 Subject: [PATCH 3/8] Apply suggestions from code review [ci skip] From Mathieu and Eric Co-authored-by: Mathieu Scheltienne Co-authored-by: Eric Larson --- .../eyetracking/_pupillometry.py | 23 +++++++++---------- .../eyetracking/tests/test_pupillometry.py | 3 ++- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/mne/preprocessing/eyetracking/_pupillometry.py b/mne/preprocessing/eyetracking/_pupillometry.py index 49b681a6bfa..5828faf1a2e 100644 --- a/mne/preprocessing/eyetracking/_pupillometry.py +++ b/mne/preprocessing/eyetracking/_pupillometry.py @@ -146,7 +146,7 @@ def pupil_zscores(epochs, baseline=(None, 0)): Returns ------- pupil_data : array - An array of pupil size data, shape (``n_epochs``, ``n_channels``, ``n_times``). + An array of pupil size data, shape (n_epochs, n_channels, n_times). """ # Code ported from https://github.com/pyeparse/pyeparse _check_preload(epochs, "Z-score normalization") @@ -204,12 +204,12 @@ def deconvolve( Maximum number of iterations of minimization algorithm. Default is ``500``. kernel : array | None Kernel to assume when doing deconvolution. If ``None``, the - Hoeks and Levelt (1993) kernel will be used. :footcite:p:`Hoeks1993`. + Hoeks and Levelt (1993)\ :footcite:p:`Hoeks1993` kernel will be used. %(n_jobs)s acc : float The requested accuracy. Lower accuracy generally means smoother fits. - method : str + method : ``"minimize"`` | ``"inverse"`` Can be ``"minimize"`` to use SLSQP or ``"inverse"`` to use Tikhonov-regularized pseudoinverse. Default is ``"minimize"``. reg : float @@ -218,17 +218,16 @@ def deconvolve( Returns ------- - fit : array - Array of fits, of shape (``n_epochs``, ``n_channels``, ``n_fit_times``). - times : array + fit : array, shape (n_epochs, n_channels, n_fit_times) + Array of fits. + times : array, shape (n_fit_times,) The array of times at which points were fit. Notes ----- - This method is adapted from: - - Wierda et al., 2012, "Pupil dilation deconvolution reveals the - dynamics of attention at high temporal resolution." :footcite:p:`Wierda2012` + This method is adapted from Wierda et al., 2012, "Pupil dilation + deconvolution reveals the dynamics of attention at high temporal + resolution."\ :footcite:p:`Wierda2012` Our implementation does not, by default, force all weights to be greater than zero. It also does not do first-order detrending, @@ -357,10 +356,10 @@ def pupil_kernel(sfreq, dur=4.0, t_max=0.930, n=10.1, s=1.0): Length (in seconds) of the generated kernel. Default is ``4.0`` seconds. t_max : float Time (in seconds) where the response maximum is stipulated to occur. Default is - ``0.930`` seconds, as in Hoeks and Levelt (1993). :footcite:p:`Hoeks1993`. + ``0.930`` seconds, as in Hoeks and Levelt (1993)\ :footcite:p:`Hoeks1993`. n : float Number of negative-exponential layers in the cascade defining the - gamma function. Default is ``10.1``, as in Hoeks and Levelt (1993). + gamma function. Default is ``10.1``, as in Hoeks and Levelt (1993)\ :footcite:p:`Hoeks1993`. s : float | None Desired value for the area under the kernel. If ``None``, no scaling is diff --git a/mne/preprocessing/eyetracking/tests/test_pupillometry.py b/mne/preprocessing/eyetracking/tests/test_pupillometry.py index 6a39ccaea16..c98d1de9037 100644 --- a/mne/preprocessing/eyetracking/tests/test_pupillometry.py +++ b/mne/preprocessing/eyetracking/tests/test_pupillometry.py @@ -11,7 +11,6 @@ from mne.preprocessing.eyetracking import deconvolve, interpolate_blinks fname = data_path(download=False) / "eyetrack" / "test_eyelink.asc" -# pytest.importorskip("pandas") @requires_testing_data @@ -26,6 +25,8 @@ ) def test_interpolate_blinks(buffer, match, cause_error, interpolate_gaze): """Test interpolating pupil data during blinks.""" + pytest.importorskip("pandas") + raw = read_raw_eyelink(fname, create_annotations=["blinks"], find_overlaps=True) # Create a dummy stim channel # this will hit a certain line in the interpolate_blinks function From 693fa0825e20062763eece36decf447f8cfbb0a2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 19 Mar 2024 17:00:42 +0000 Subject: [PATCH 4/8] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- mne/preprocessing/eyetracking/_pupillometry.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mne/preprocessing/eyetracking/_pupillometry.py b/mne/preprocessing/eyetracking/_pupillometry.py index 5828faf1a2e..787d888ebf8 100644 --- a/mne/preprocessing/eyetracking/_pupillometry.py +++ b/mne/preprocessing/eyetracking/_pupillometry.py @@ -185,7 +185,7 @@ def deconvolve( method="minimize", reg=100, ): - """Deconvolve pupillary responses. + r"""Deconvolve pupillary responses. Parameters ---------- @@ -225,8 +225,8 @@ def deconvolve( Notes ----- - This method is adapted from Wierda et al., 2012, "Pupil dilation - deconvolution reveals the dynamics of attention at high temporal + This method is adapted from Wierda et al., 2012, "Pupil dilation + deconvolution reveals the dynamics of attention at high temporal resolution."\ :footcite:p:`Wierda2012` Our implementation does not, by default, force all weights to be @@ -346,7 +346,7 @@ def _score(vals, x_0, conv_mat): def pupil_kernel(sfreq, dur=4.0, t_max=0.930, n=10.1, s=1.0): - """Generate pupil response kernel modeled as an Erlang gamma function. + r"""Generate pupil response kernel modeled as an Erlang gamma function. Parameters ---------- From 029afcaf42860fe81273578167f2ed1b2bb7742f Mon Sep 17 00:00:00 2001 From: Scott Huberty Date: Tue, 19 Mar 2024 10:11:07 -0700 Subject: [PATCH 5/8] FIX: nest import of BaseEpochs Was causing a circular import error in https://github.com/mne-tools/mne-python/actions/runs/8347127725/job/22846082215?pr=12505 --- mne/preprocessing/eyetracking/_pupillometry.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mne/preprocessing/eyetracking/_pupillometry.py b/mne/preprocessing/eyetracking/_pupillometry.py index 787d888ebf8..09462edc742 100644 --- a/mne/preprocessing/eyetracking/_pupillometry.py +++ b/mne/preprocessing/eyetracking/_pupillometry.py @@ -5,7 +5,6 @@ import numpy as np -from mne import BaseEpochs from mne._fiff.pick import _picks_to_idx from mne.parallel import parallel_func @@ -148,6 +147,8 @@ def pupil_zscores(epochs, baseline=(None, 0)): pupil_data : array An array of pupil size data, shape (n_epochs, n_channels, n_times). """ + from mne import BaseEpochs + # Code ported from https://github.com/pyeparse/pyeparse _check_preload(epochs, "Z-score normalization") _validate_type(epochs, BaseEpochs, "epochs") From 0c54803c9ce6de58ce87b08f108234d3612b6515 Mon Sep 17 00:00:00 2001 From: Scott Huberty Date: Tue, 19 Mar 2024 10:12:47 -0700 Subject: [PATCH 6/8] DOC: update changelog --- doc/changes/devel/12505.newfeature.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 doc/changes/devel/12505.newfeature.rst diff --git a/doc/changes/devel/12505.newfeature.rst b/doc/changes/devel/12505.newfeature.rst new file mode 100644 index 00000000000..141f7c8f9c1 --- /dev/null +++ b/doc/changes/devel/12505.newfeature.rst @@ -0,0 +1 @@ +Added functions for deconvolving eyetracking pupil size data: :func:`mne.preprocessing.eyetracking.deconvolve` :func:`mne.preprocessing.eyetracking.pupil_zscores`, and :func:`mne.preprocessing.eyetracking.pupil_kernel`. by `Scott Huberty`_. From 6b2ea4682b7e2e794c3cbfeecddb4b28874d3d39 Mon Sep 17 00:00:00 2001 From: Scott Huberty Date: Tue, 19 Mar 2024 10:17:13 -0700 Subject: [PATCH 7/8] Apply remaining suggestions from Mathieu Co-authored-by: Mathieu Scheltienne --- doc/references.bib | 1 + mne/preprocessing/eyetracking/_pupillometry.py | 4 +--- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/references.bib b/doc/references.bib index 43438ddc0dc..21b216f45f4 100644 --- a/doc/references.bib +++ b/doc/references.bib @@ -2167,6 +2167,7 @@ @inproceedings{StrohmeierEtAl2015 @article{Wierda2012, title = "Pupil dilation deconvolution reveals the dynamics of attention at high temporal resolution", + doi = {10.1073/pnas.1201858109}, author = "Wierda, Stefan M and van Rijn, Hedderik and Taatgen, Niels A and Martens, Sander", journal = "Proceedings of the National Academy of Sciences", diff --git a/mne/preprocessing/eyetracking/_pupillometry.py b/mne/preprocessing/eyetracking/_pupillometry.py index 09462edc742..0d6ec6fb41a 100644 --- a/mne/preprocessing/eyetracking/_pupillometry.py +++ b/mne/preprocessing/eyetracking/_pupillometry.py @@ -154,9 +154,7 @@ def pupil_zscores(epochs, baseline=(None, 0)): _validate_type(epochs, BaseEpochs, "epochs") _validate_type(baseline, (tuple, list, np.ndarray), "baseline") - pupil_picks = _picks_to_idx(epochs.info, "pupil", allow_empty=True) - if not pupil_picks.any(): - raise RuntimeError("no pupil data") + pupil_picks = _picks_to_idx(epochs.info, "pupil", allow_empty=False) if len(baseline) != 2: raise RuntimeError("baseline must be a 2-element list") baseline = np.array(baseline) From 8a8cecbb537c5582817018e99497c5fe21535325 Mon Sep 17 00:00:00 2001 From: Scott Huberty Date: Tue, 19 Mar 2024 19:16:08 -0700 Subject: [PATCH 8/8] FIX, DOC: add n_fit_times to xref_ignore --- doc/conf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/conf.py b/doc/conf.py index a00a34debc3..baf25f96b44 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -340,6 +340,7 @@ "n_signals", "n_step", "n_freqs", + "n_fit_times", "wsize", "Tx", "M",