diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 25d7db683..be47f5a7d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -79,7 +79,7 @@ repos: hooks: - id: codespell args: - - --ignore-words-list=additionals,alle,alot,bund,currenty,datas,farenheit,falsy,fo,haa,hass,iif,incomfort,ines,ist,nam,nd,pres,pullrequests,resset,rime,ser,serie,te,technik,ue,unsecure,withing,zar + - --ignore-words-list=additionals,alle,aline,alot,bund,currenty,datas,farenheit,falsy,fo,haa,hass,iif,incomfort,ines,ist,nam,nd,perfomances,pres,pullrequests,resset,rime,ser,serie,te,technik,ue,unsecure,withing,zar - --skip="./.*,*.csv,*.json,*.ambr" - --quiet-level=2 exclude_types: [ csv, json, svg ] diff --git a/docs/source/datasets.rst b/docs/source/datasets.rst index 48ba40380..14aa8e528 100644 --- a/docs/source/datasets.rst +++ b/docs/source/datasets.rst @@ -21,6 +21,9 @@ Motor Imagery Datasets BNCI2015_001 BNCI2015_004 Cho2017 + Dreyer2023A + Dreyer2023B + Dreyer2023C Lee2019_MI GrosseWentrup2009 Ofner2017 diff --git a/docs/source/whats_new.rst b/docs/source/whats_new.rst index 652dd3672..a6a464467 100644 --- a/docs/source/whats_new.rst +++ b/docs/source/whats_new.rst @@ -20,6 +20,7 @@ Enhancements - Adding cache option to the evaluation (:gh:`517` by `Bruno Aristimunha`_) - Option to interpolate channel in paradigms' `match_all` method (:gh:`480` by `Gregoire Cattan`_) +- Adding new motor imagery dataset, Dreyer2023 (PR :gh: `404` by `Sara Sedlar`_ and `Sylvain Chevallier`_) - Adding leave k-Subjects out evaluations (:gh:`470` by `Bruno Aristimunha`_) - Update Braindecode dependency to 0.8 (:gh:`542` by `Pierre Guetschel`_) - Improve transform function of AugmentedDataset (:gh:`541` by `Quentin Barthelemy`_) @@ -31,6 +32,7 @@ Bugs - Fix TRCA implementation for different stimulation freqs and for signal filtering (:gh:522 by `Sylvain Chevallier`_) - Fix saving to BIDS runs with a description string in their name (:gh:`530` by `Pierre Guetschel`_) +- Fix issue with WithinSessionEvaluation with multiple datasets (PR :gh: `521`, issue :gh: `514` by `Sara Sedlar`_) - Fix import of keras BatchNormalization for TF 2.13 and higher (:gh:`544` by `Brian Irvine`_) - Fix the doc summary tables of :class:`moabb.datasets.Lee2019_SSVEP` (:gh:`548` :gh:`547` :gh:`546` by `Pierre Guetschel`_) - Fix the doc summary for Castillos2023 dataset (:gh:`561` by `Bruno Aristimunha`_) diff --git a/examples/Dreyer_clf_scores_vs_subj_info.py b/examples/Dreyer_clf_scores_vs_subj_info.py new file mode 100644 index 000000000..aae9c2cfe --- /dev/null +++ b/examples/Dreyer_clf_scores_vs_subj_info.py @@ -0,0 +1,159 @@ +""" +=============================================== +Examples of analysis of a Dreyer2023 A dataset. +=============================================== + +This example shows how to plot Dreyer2023A Left-Right Imagery ROC AUC scores +obtained with CSP+LDA pipeline versus demographic information of the examined +subjects (gender and age) and experimenters (gender). + +To reduce computational time, the example is provided for four subjects. + +""" + +# Authors: Sara Sedlar +# Sylvain Chevallier +# License: BSD (3-clause) + +import matplotlib.patches as mpatches +import matplotlib.pyplot as plt +import pandas as pd +import seaborn as sb +from pyriemann.estimation import Covariances +from pyriemann.spatialfilters import CSP +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA +from sklearn.pipeline import make_pipeline + +from moabb.datasets import Dreyer2023A +from moabb.evaluations import WithinSessionEvaluation +from moabb.paradigms import MotorImagery + + +# 1. Defining dataset, selecting subject for analysis and getting data +######################################################################################## +dreyer2023 = Dreyer2023A() +dreyer2023.subject_list = [1, 5, 7, 35] +dreyer2023.get_data() +######################################################################################## +# 2. Defining MotorImagery paradigm and CSP+LDA pipeline +paradigm = MotorImagery() +pipelines = {} +pipelines["CSP+LDA"] = make_pipeline( + Covariances(estimator="oas"), CSP(nfilter=6), LDA(solver="lsqr", shrinkage="auto") +) +######################################################################################## +# 3. Within session evaluation of the pipeline +evaluation = WithinSessionEvaluation( + paradigm=paradigm, datasets=[dreyer2023], suffix="examples", overwrite=True +) +results = evaluation.process(pipelines) +results = results.loc[results["subject"].isin([str(s) for s in dreyer2023.subject_list])] +######################################################################################## +# 4. Loading dataset info and concatenation with the obtained results +info = dreyer2023.get_subject_info().rename(columns={"score": "score_MR"}) +results_info = pd.concat([info, results], axis=1) +results_info["Age"] = 2019 - results_info["Birth_year"] +######################################################################################## +# 5.1 Plotting subject AUC ROC scores vs subject's gender +fig, ax = plt.subplots(nrows=2, ncols=2, facecolor="white", figsize=[16, 8], sharey=True) +fig.subplots_adjust(wspace=0.0, hspace=0.5) +sb.boxplot( + data=results_info, y="score", x="SUJ_gender", ax=ax[0, 0], palette="Set1", width=0.3 +) +sb.stripplot( + data=results_info, + y="score", + x="SUJ_gender", + ax=ax[0, 0], + palette="Set1", + linewidth=1, + edgecolor="k", + size=3, + alpha=0.3, + zorder=1, +) +ax[0, 0].set_title("AUC ROC scores vs. subject gender") +ax[0, 0].set_xticklabels(["Man", "Woman"]) +ax[0, 0].set_ylabel("ROC AUC") +ax[0, 0].set_xlabel(None) +ax[0, 0].set_ylim(0.3, 1) +######################################################################################## +# 5.2 Plotting subject AUC ROC scores vs subjects's age per gender +sb.regplot( + data=results_info[results_info["SUJ_gender"] == 1][["score", "Age"]].astype( + "float32" + ), + y="score", + x="Age", + ax=ax[0, 1], + scatter_kws={"color": "#e41a1c", "alpha": 0.5}, + line_kws={"color": "#e41a1c"}, +) +sb.regplot( + data=results_info[results_info["SUJ_gender"] == 2][["score", "Age"]].astype( + "float32" + ), + y="score", + x="Age", + ax=ax[0, 1], + scatter_kws={"color": "#377eb8", "alpha": 0.5}, + line_kws={"color": "#377eb8"}, +) +ax[0, 1].set_title("AUC ROC scores vs. subject age per gender") +ax[0, 1].set_ylabel(None) +ax[0, 1].set_xlabel(None) +ax[0, 1].legend( + handles=[ + mpatches.Patch(color="#e41a1c", label="Man"), + mpatches.Patch(color="#377eb8", label="Woman"), + ] +) +######################################################################################## +# 5.3 Plotting subject AUC ROC scores vs experimenter's gender +sb.boxplot( + data=results_info, y="score", x="EXP_gender", ax=ax[1, 0], palette="Set1", width=0.3 +) +sb.stripplot( + data=results_info, + y="score", + x="EXP_gender", + ax=ax[1, 0], + palette="Set1", + linewidth=1, + edgecolor="k", + size=3, + alpha=0.3, + zorder=1, +) +ax[1, 0].set_title("AUC ROC scores vs. experimenter gender") +ax[1, 0].set_xticklabels(["Man", "Woman"]) +ax[1, 0].set_ylabel("ROC AUC") +ax[1, 0].set_xlabel(None) +ax[1, 0].set_ylim(0.3, 1) +######################################################################################## +# 5.4 Plotting subject AUC ROC scores vs subject's age +sb.regplot( + data=results_info[["score", "Age"]].astype("float32"), + y="score", + x="Age", + ax=ax[1, 1], + scatter_kws={"color": "black", "alpha": 0.5}, + line_kws={"color": "black"}, +) +ax[1, 1].set_title("AUC ROC scores vs. subject age") +ax[1, 1].set_ylabel(None) +plt.show() +######################################################################################## +# 5.5 Obtained results for four selected subjects correspond to the following figure. +# +# .. image:: images/Dreyer_clf_scores_vs_subj_info/4_selected_subjects.png +# :align: center +# :alt: 4_selected_subjects +# +# Obtained results for all subjects correspond to the following figure. +# +# .. image:: images/Dreyer_clf_scores_vs_subj_info/all_subjects.png +# :align: center +# :alt: all_subjects +# +######################################################################################## diff --git a/examples/images/Dreyer_clf_scores_vs_subj_info/4_selected_subjects.png b/examples/images/Dreyer_clf_scores_vs_subj_info/4_selected_subjects.png new file mode 100644 index 000000000..15cb02d4c Binary files /dev/null and b/examples/images/Dreyer_clf_scores_vs_subj_info/4_selected_subjects.png differ diff --git a/examples/images/Dreyer_clf_scores_vs_subj_info/all_subjects.png b/examples/images/Dreyer_clf_scores_vs_subj_info/all_subjects.png new file mode 100644 index 000000000..f52f8bef6 Binary files /dev/null and b/examples/images/Dreyer_clf_scores_vs_subj_info/all_subjects.png differ diff --git a/moabb/datasets/Dreyer2023.py b/moabb/datasets/Dreyer2023.py new file mode 100644 index 000000000..02713d035 --- /dev/null +++ b/moabb/datasets/Dreyer2023.py @@ -0,0 +1,524 @@ +""" +A large EEG right-left hand motor imagery dataset. +It is organized into three A, B, C datasets. +URL PATH: https://zenodo.org/record/7554429 +""" + +import os +import zipfile +from functools import partialmethod +from os.path import exists, join + +import pandas as pd +from mne.io import read_raw_gdf +from pooch import retrieve + +from .base import BaseDataset +from .download import get_dataset_path + + +# fmt: off +RECORD_INFO = { + "Demo_Bio": ["SUJ_gender", "Birth_year", "Vision", "Vision_assistance", + "Symptoms_TXT", "Level of study", "Level_knowledge neuro", + "Meditation practice", "Laterality answered", "Manual activity", + "Manual activity TXT"], + "OpenVibe_Perf": ["Perf_RUN_3", "Perf_RUN_4", "Perf_RUN_5", "Perf_RUN_6"], + "Mental_Rotation": ["score", "time_1", "time_2"], + "PRE_session": ["PRE_Mood", "PRE_Mindfulness", "PRE_Motivation", + "PRE_Hours_sleep_last_night", "PRE_Usual_sleep", + "PRE_Level_of_alertness", "PRE_Stimulant_doses_12h", + "PRE_Stimulant_doses_2h", "PRE_Stim_normal", "PRE_Tabacco", + "PRE_Tabacco_normal", "PRE_Alcohol", "PRE_Last_meal", + "PRE_Last_pills", "PRE_Pills_TXT", "PRE_Nervousness", + "PRE_Awakening", "PRE_Concentration"], + "POST_session": ["POST_Mood", "POST_Mindfulness", "POST_Motivation", + "POST_Cognitive load", "POST_Agentivity", + "POST_Expectations_filled"], + "Index_of_Learnig_Style": ["active", "reflexive", "sensory", "intuitive", "visual", + "verbal", "sequential", "global"], + "16Pf5": ["A", "B", "C_", "E", "F", "G", "H", "I", "L", "M", "N", "O", "Q1", "Q2", + "Q3", "Q4", "IM", "EX", "AX", "TM", "IN", "SC", "Interrogation"], + "Experimenter_Gender": ['EXP_gender'] +} +# fmt: on + +DREYER2023_URL = "https://zenodo.org/record/7554429/files/BCI Database.zip" + + +def dreyer2023_subject_path(basepath, db_id, subject): + """Returns subject path. If it does not exist, it downloads data first.""" + """ + Arguments: + basepath [str]: path to the datasets + db_id [str]: database ID (options A, B, C) + subject [int]: subject number + Returns: + str: path to the subject's data + """ + subj_path = join(basepath, "BCI Database", "Signals", "DATA {0}", "{0}{1}").format( + db_id, subject + ) + if not exists(subj_path): + if not exists(join(basepath, "data.zip")): + retrieve( + DREYER2023_URL, None, fname="data.zip", path=basepath, progressbar=True + ) + with zipfile.ZipFile(os.path.join(basepath, "data.zip"), "r") as f: + f.extractall(basepath) + os.remove(join(basepath, "data.zip")) + return subj_path + + +class Dreyer2023Base(BaseDataset): + """Class for Dreyer2023 dataset management. MI dataset.""" + + """ + Parent class of Dreyer2023A, Dreyer2023B and Dreyer2023C. + Should not be instantiated. + """ + + def __init__(self, subjects, db_id="A"): + assert db_id in [ + "A", + "B", + "C", + ], "Invalid dataset selection! Existing Dreyer2023 datasets: A, B, and C." + self.db_id = db_id + self.db_idx_off = dict(A=0, B=60, C=81) + + super().__init__( + subjects, + sessions_per_subject=1, + events=dict(left_hand=1, right_hand=2), + code="Dreyer2023" + self.db_id, + interval=[3, 8], + paradigm="imagery", + doi="10.5281/zenodo.7554429", + ) + + def get_subject_info(self, path=None, subjects=None, infos=None): + """Loads subject info.""" + """ + Arguments: + path: path to the dataset + subjects: list of subjects + infos: list of recording infos to load + Returns: + DataFrame: selected recording info for given subjects + """ + if isinstance(subjects, type(None)): + subjects = self.subject_list + if len([s for s in subjects if s not in self.subject_list]): + raise ValueError("Invalid subject selection") + + if isinstance(infos, type(None)): + infos = list(RECORD_INFO.keys()) + + path = get_dataset_path("DREYER", path) + basepath = join(path, "MNE-dreyer-2023") + + perform_path = join(basepath, "BCI Database", "Perfomances.xlsx") + + df = pd.read_excel(perform_path) + + if self.db_id == "A": + df.columns = df.iloc[1] + df = df.iloc[list(range(2, 62)), :] + if self.db_id == "B": + df.columns = df.iloc[65] + df = df.iloc[list(range(66, 87)), :] + if self.db_id == "C": + df.columns = df.iloc[90] + df = df.iloc[list(range(91, 97)), :] + df.reset_index(drop=True, inplace=True) + df.columns.name = None + + subjects = [ + ( + self.db_id + str(s + self.db_idx_off[self.db_id]) + if not str(s).startswith(self.db_id) + else str(s) + ) + for s in subjects + ] + + assert not any( + [s for s in subjects if s not in df["SUJ_ID"].tolist()] + ), "Invalid subject selection." + df = df.loc[df["SUJ_ID"].isin(subjects)] + + info_select = ["SUJ_ID"] + for i in infos: + if i in RECORD_INFO.keys(): + for j in RECORD_INFO[i]: + if j in df.columns: + info_select.append(j) + elif i in df.columns: + info_select.append(i) + else: + raise ValueError("Invalid info selection.") + + return df[info_select].reset_index(drop=True) + + def _get_single_subject_data(self, subject): + subj_dir = self.data_path(subject) + + subj_id = self.db_id + str(subject + self.db_idx_off[self.db_id]) + # fmt: off + ch_names = ["Fz", "FCz", "Cz", "CPz", "Pz", "C1", "C3", "C5", "C2", "C4", "C6", + "EOG1", "EOG2", "EOG3", "EMGg", "EMGd", "F4", "FC2", "FC4", "FC6", + "CP2", "CP4", "CP6", "P4", "F3", "FC1", "FC3", "FC5", "CP1", "CP3", + "CP5", "P3"] + # fmt: on + ch_types = ["eeg"] * 11 + ["eog"] * 3 + ["emg"] * 2 + ["eeg"] * 16 + ch_map = dict(zip(ch_names, ch_types)) + + # Closed and open eyes baselines + baselines = {} + baselines["ce"] = read_raw_gdf( + join(subj_dir, subj_id + "_{0}_baseline.gdf").format("CE"), + eog=["EOG1", "EOG2", "EOG3"], + misc=["EMGg", "EMGd"], + verbose="WARNING", + ) + baselines["ce"].set_channel_types(ch_map) + baselines["oe"] = read_raw_gdf( + join(subj_dir, subj_id + "_{0}_baseline.gdf").format("OE"), + eog=["EOG1", "EOG2", "EOG3"], + misc=["EMGg", "EMGd"], + verbose="WARNING", + ) + baselines["oe"].set_channel_types(ch_map) + # Recordings + recordings = {} + # i - index, n - name, t - type + for r_i, (r_n, r_t) in enumerate( + zip( + ["R1", "R2", "R3", "R4", "R5", "R6"], + ["acquisition"] * 2 + ["onlineT"] * 4, + ) + ): + # One subject of dataset A has 4 recordings + if r_i > 3 and self.db_id == "A" and subject == 59: + continue + + recordings["%d" % r_i] = read_raw_gdf( + join(subj_dir, subj_id + "_{0}_{1}.gdf".format(r_n, r_t)), + preload=True, + eog=["EOG1", "EOG2", "EOG3"], + misc=["EMGg", "EMGd"], + verbose="WARNING", + ) + recordings["%d" % r_i].set_channel_types(ch_map) + + recordings["%d" % r_i].annotations.rename( + {"769": "left_hand", "770": "right_hand"} + ) + + return {"0": recordings} + + def data_path( + self, subject, path=None, force_update=False, update_path=None, verbose=None + ): + if subject not in self.subject_list: + raise (ValueError("Invalid subject number")) + path = get_dataset_path("DREYER", path) + basepath = join(path, "MNE-dreyer-2023") + if not os.path.isdir(basepath): + os.makedirs(basepath) + return dreyer2023_subject_path( + basepath, self.db_id, subject + self.db_idx_off[self.db_id] + ) + + +class Dreyer2023A(Dreyer2023Base): + """Class for Dreyer2023A dataset management. MI dataset. + + .. admonition:: Dataset summary + + =========== ======= ======= ========== ================= ============ =============== =========== + Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions + =========== ======= ======= ========== ================= ============ =============== =========== + Dreyer2023A 60 27 2 20 5s 512 Hz 6 + =========== ======= ======= ========== ================= ============ =============== =========== + + Dataset description: + + "A large EEG database with users' profile information for motor imagery + Brain-Computer Interface research" [1, 2] + + Data collectors : Appriou Aurélien; Caselli Damien; Benaroch Camille; + Yamamoto Sayu Maria; Roc Aline; Lotte Fabien; + Dreyer Pauline; Pillette Léa + Data manager : Dreyer Pauline + Project leader : Lotte Fabien + Project members : Rimbert Sébastien; Monseigne Thibaut + + Dataset Dreyer2023A contains EEG, EOG and EMG signals recorded on 60 healthy subjects + performing Left-Right Motor Imagery experiments + (29 women, age 19-59, M = 29, SD = 9.32) [1]. + Experiments were conducted by six experimenters. In addition, for each recording + the following pieces of information are provided: + subject's demographic, personality and cognitive profiles, the OpenViBE experimental + instructions and codes, and experimenter's gender. + + The experiment is designed for the investigation of the impact of the participant's + and experimenter's gender on MI BCI performance [1]. + + A recording contains open and closed eyes baseline recordings and 6 runs of the MI + experiments. First 2 runs (acquisition runs) were used to train system and + the following 4 runs (training runs) to train the participant. Each run contained + 40 trials [1]. + + Each trial was recorded as follows [1]: + - t=0.00s cross displayed on screen + - t=2.00s acoustic signal announced appearance of a red arrow + - t=3.00s a red arrow appears (subject starts to perform task) + - t=4.25s the red arrow disappears + - t=4.25s the feedback on performance is given in form of a blue bar + with update frequency of 16 Hz + - t=8.00s cross turns off (subject stops to perform task) + + EEG signals [1]: + - recorded with 27 electrodes, namely: + Fz, FCz, Cz, CPz, Pz, C1, C3, C5, C2, C4, C6, F4, FC2, FC4, FC6, CP2, + CP4, CP6, P4, F3, FC1, FC3, FC5, CP1, CP3, CP5, P3 (10-20 system), + referenced to the left earlobe. + + EOG signals [1]: + - recorded with 3 electrodes, namely: EOG1, EOG2, EOG3 + placed below, above and on the side of one eye. + + EMG signals [1]: + - recorded with 2 electrodes, namely: EMGg, EMGd + placed 2.5cm below the skinfold on each wrist. + + Demographic and biosocial information includes: + - gender, birth year, laterality + - vision, vision assistance + - familiarity to cognitive science or neurology, level of education + - physical activity, meditation + - attentional, neurological, psychiatrics symptoms + + Personality and the cognitive profile [1]: + - evaluated via 5th edition of the 16 Personality Factors (16PF5) test + - and mental rotation test + - index of learning style + + Pre and post experiment questionnaires [1]: + - evaluation of pre and post mood, mindfulness and motivational states + + The online OpenViBE BCI classification performance [1]: + - only performance measure used to give the feedback to the participants + + * Subject 59 contains only 4 runs + + # TO DO: + * Article [1] states there is 29/60 women, in the excel file it is 30/60 + * Sampling frequency? 256 Hz in [1], 512 in loaded info and at URL + + [1] Pillette, L., Roc, A., N’kaoua, B., & Lotte, F. (2021). + Experimenters' influence on mental-imagery based brain-computer interface user training. + International Journal of Human-Computer Studies, 149, 102603. + [2] Benaroch, C., Yamamoto, M. S., Roc, A., Dreyer, P., Jeunet, C., & Lotte, F. (2022). + When should MI-BCI feature optimization include prior knowledge, and which one?. + Brain-Computer Interfaces, 9(2), 115-128. + """ + + __init__ = partialmethod( + Dreyer2023Base.__init__, subjects=list(range(1, 61)), db_id="A" + ) + + +class Dreyer2023B(Dreyer2023Base): + """Class for Dreyer2023B dataset management. MI dataset. + + .. admonition:: Dataset summary + + =========== ======= ======= ========== ================= ============ =============== =========== + Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions + =========== ======= ======= ========== ================= ============ =============== =========== + Dreyer2023B 21 27 2 20 5s 512 Hz 6 + =========== ======= ======= ========== ================= ============ =============== =========== + + Dataset description: + + "A large EEG database with users' profile information for motor imagery + Brain-Computer Interface research" [1, 2] + + Data collectors : Appriou Aurélien; Caselli Damien; Benaroch Camille; + Yamamoto Sayu Maria; Roc Aline; Lotte Fabien; + Dreyer Pauline; Pillette Léa + Data manager : Dreyer Pauline + Project leader : Lotte Fabien + Project members : Rimbert Sébastien; Monseigne Thibaut + + Dataset Dreyer2023B contains EEG, EOG and EMG signals recorded on 21 healthy subjects + performing Left-Right Motor Imagery experiments + (8 women, age 19-37, M = 29, SD = 9.318) [2]. + Experiments were conducted by female experimenters. In addition, for each recording + the following pieces of information are provided: + subject's demographic, personality and cognitive profiles, the OpenViBE experimental + instructions and codes, and experimenter's gender. + + The experiment is designed for the investigation of the relation between MI-BCI online + performance and Most Discriminant Frequency Band (MDFB) [2]. + + A recording contains open and closed eyes baseline recordings and 6 runs of the MI + experiments. First 2 runs (acquisition runs) were used to train system and + the following 4 runs (training runs) to train the participant. Each run contained + 40 trials [1]. + + Each trial was recorded as follows [1]: + - t=0.00s cross displayed on screen + - t=2.00s acoustic signal announced appearance of a red arrow + - t=3.00s a red arrow appears (subject starts to perform task) + - t=4.25s the red arrow disappears + - t=4.25s the feedback on performance is given in form of a blue bar + with update frequency of 16 Hz + - t=8.00s cross turns off (subject stops to perform task) + + EEG signals [1]: + - recorded with 27 electrodes, namely: + Fz, FCz, Cz, CPz, Pz, C1, C3, C5, C2, C4, C6, F4, FC2, FC4, FC6, CP2, + CP4, CP6, P4, F3, FC1, FC3, FC5, CP1, CP3, CP5, P3 (10-20 system), + referenced to the left earlobe. + + EOG signals [1]: + - recorded with 3 electrodes, namely: EOG1, EOG2, EOG3 + placed below, above and on the side of one eye. + + EMG signals [1]: + - recorded with 2 electrodes, namely: EMGg, EMGd + placed 2.5cm below the skinfold on each wrist. + + Demographic and biosocial information includes: + - gender, birth year, laterality + - vision, vision assistance + - familiarity to cognitive science or neurology, level of education + - physical activity, meditation + - attentional, neurological, psychiatrics symptoms + + Personality and the cognitive profile [1]: + - evaluated via 5th edition of the 16 Personality Factors (16PF5) test + - and mental rotation test + - index of learning style + + Pre and post experiment questionnaires [1]: + - evaluation of pre and post mood, mindfulness and motivational states + + The online OpenViBE BCI classification performance [1]: + - only performance measure used to give the feedback to the participants + + # TO DO: + * Sampling frequency? 256 Hz in [1], 512 in loaded info and at URL + Mapping based on MDFB as in [2] + database_B = ['B' + str(i) for i in range(61, 82)] + database_A = ['A' + str(i) for i in [43, 44, 6, 10, 52, 23, 48, 24, 40, 43, 2, 1, + 13, 22, 25, 29, 3, 11, 30, 19, 21]] + cross_database_mapping = dict(zip(database_B, database_A)) + + [1] Pillette, L., Roc, A., N’kaoua, B., & Lotte, F. (2021). + Experimenters' influence on mental-imagery based brain-computer interface user training. + International Journal of Human-Computer Studies, 149, 102603. + [2] Benaroch, C., Yamamoto, M. S., Roc, A., Dreyer, P., Jeunet, C., & Lotte, F. (2022). + When should MI-BCI feature optimization include prior knowledge, and which one?. + Brain-Computer Interfaces, 9(2), 115-128. + """ + + __init__ = partialmethod( + Dreyer2023Base.__init__, subjects=list(range(1, 22)), db_id="B" + ) + + +class Dreyer2023C(Dreyer2023Base): + """Class for Dreyer2023C dataset management. MI dataset. + + .. admonition:: Dataset summary + + =========== ======= ======= ========== ================= ============ =============== =========== + Name #Subj #Chan #Classes #Trials / class Trials len Sampling rate #Sessions + =========== ======= ======= ========== ================= ============ =============== =========== + Dreyer2023C 6 27 2 20 5s 512 Hz 6 + =========== ======= ======= ========== ================= ============ =============== =========== + + Dataset description: + + "A large EEG database with users' profile information for motor imagery + Brain-Computer Interface research" [1, 2] + + Data collectors : Appriou Aurélien; Caselli Damien; Benaroch Camille; + Yamamoto Sayu Maria; Roc Aline; Lotte Fabien; + Dreyer Pauline; Pillette Léa + Data manager : Dreyer Pauline + Project leader : Lotte Fabien + Project members : Rimbert Sébastien; Monseigne Thibaut + + Dataset Dreyer2023C contains EEG, EOG and EMG signals recorded on 6 healthy subjects + performing Left-Right Motor Imagery experiments (4 women) who participated in datasets + A or B. + In addition, for each recording the following pieces of information are provided: + subject's demographic, personality and cognitive profiles, the OpenViBE experimental + instructions and codes, and experimenter's gender. + + A recording contains open and closed eyes baseline recordings and 6 runs of the MI + experiments. First 2 runs (acquisition runs) were used to train system and + the following 4 runs (training runs) to train the participant. Each run contained + 40 trials [1]. + + Each trial was recorded as follows [1]: + - t=0.00s cross displayed on screen + - t=2.00s acoustic signal announced appearance of a red arrow + - t=3.00s a red arrow appears (subject starts to perform task) + - t=4.25s the red arrow disappears + - t=4.25s the feedback on performance is given in form of a blue bar + with update frequency of 16 Hz + - t=8.00s cross turns off (subject stops to perform task) + + EEG signals [1]: + - recorded with 27 electrodes, namely: + Fz, FCz, Cz, CPz, Pz, C1, C3, C5, C2, C4, C6, F4, FC2, FC4, FC6, CP2, + CP4, CP6, P4, F3, FC1, FC3, FC5, CP1, CP3, CP5, P3 (10-20 system), + referenced to the left earlobe. + + EOG signals [1]: + - recorded with 3 electrodes, namely: EOG1, EOG2, EOG3 + placed below, above and on the side of one eye. + + EMG signals [1]: + - recorded with 2 electrodes, namely: EMGg, EMGd + placed 2.5cm below the skinfold on each wrist. + + Demographic and biosocial information includes: + - gender, birth year, laterality + - vision, vision assistance + - familiarity to cognitive science or neurology, level of education + - physical activity, meditation + - attentional, neurological, psychiatrics symptoms + + Personality and the cognitive profile [1]: + - evaluated via 5th edition of the 16 Personality Factors (16PF5) test + - and mental rotation test + - index of learning style + + Pre and post experiment questionnaires [1]: + - evaluation of pre and post mood, mindfulness and motivational states + + The online OpenViBE BCI classification performance [1]: + - only performance measure used to give the feedback to the participants + + # TO DO: + * Sampling frequency? 256 Hz in [1], 512 in loaded info and at URL + + [1] Pillette, L., Roc, A., N’kaoua, B., & Lotte, F. (2021). + Experimenters' influence on mental-imagery based brain-computer interface user training. + International Journal of Human-Computer Studies, 149, 102603. + [2] Benaroch, C., Yamamoto, M. S., Roc, A., Dreyer, P., Jeunet, C., & Lotte, F. (2022). + When should MI-BCI feature optimization include prior knowledge, and which one?. + Brain-Computer Interfaces, 9(2), 115-128. + """ + + __init__ = partialmethod( + Dreyer2023Base.__init__, subjects=list(range(1, 7)), db_id="C" + ) diff --git a/moabb/datasets/__init__.py b/moabb/datasets/__init__.py index f293715e3..515294686 100644 --- a/moabb/datasets/__init__.py +++ b/moabb/datasets/__init__.py @@ -54,6 +54,7 @@ CastillosCVEP40, CastillosCVEP100, ) +from .Dreyer2023 import Dreyer2023A, Dreyer2023B, Dreyer2023C from .epfl import EPFLP300 from .fake import FakeDataset, FakeVirtualRealityDataset from .gigadb import Cho2017