diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index ac9e853c8..d232dc7c6 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -13,13 +13,13 @@ jobs: steps: ## Install Braindecode - name: Checking Out Repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 # Cache MNE Data # The cache key here is fixed except for os # so if you download a new mne dataset in the code, best to manually increment the key below - name: Create/Restore MNE Data Cache id: cache-mne_data - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: ~/mne_data key: ${{ runner.os }}-v3 @@ -38,7 +38,7 @@ jobs: - run: python -c "import braindecode; print(braindecode.__version__)" - name: Checking Out Repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Create Docs run: | cd docs diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index be234d936..996fd7ba0 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -15,17 +15,17 @@ jobs: fail-fast: false matrix: os: [ "ubuntu-latest", "macos-latest", "windows-latest" ] - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9", "3.10", "3.11"] steps: ## Install Braindecode - name: Checking Out Repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 # Cache MNE Data # The cache key here is fixed except for os # so if you download a new mne dataset in the code, best to manually increment the key below - name: Create/Restore MNE Data Cache id: cache-mne_data - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: ~/mne_data key: ${{ runner.os }}-v3 diff --git a/docs/whats_new.rst b/docs/whats_new.rst index 9aa8215e1..e4c028f73 100644 --- a/docs/whats_new.rst +++ b/docs/whats_new.rst @@ -57,6 +57,7 @@ Enhancements - Add basic training example with MNE epochs (:gh:`539` by `Pierre Guetschel`_) - Log validation accuracy in :class:`braindecode.EEGClassifier` (:gh:`541` by `Pierre Guetschel`_) - Better type hints in :mod:`braindecode.augmentation.base` (:gh:`551` by `Valentin Iovene`_) +- Support for MOABB 1.0.0 and switch to pyproject.toml (:gh:`553` by `Sylvain Chevallier`_) Bugs ~~~~ diff --git a/examples/advanced_training/plot_data_augmentation.py b/examples/advanced_training/plot_data_augmentation.py index 03552c967..36da3656a 100644 --- a/examples/advanced_training/plot_data_augmentation.py +++ b/examples/advanced_training/plot_data_augmentation.py @@ -89,9 +89,8 @@ # splitted = windows_dataset.split('session') -train_set = splitted['session_T'] -valid_set = splitted['session_E'] - +train_set = splitted['0train'] # Session train +valid_set = splitted['1test'] # Session evaluation ###################################################################### # Defining a Transform # -------------------- diff --git a/examples/advanced_training/plot_data_augmentation_search.py b/examples/advanced_training/plot_data_augmentation_search.py index 0cf463c37..fdbc517a4 100644 --- a/examples/advanced_training/plot_data_augmentation_search.py +++ b/examples/advanced_training/plot_data_augmentation_search.py @@ -123,8 +123,8 @@ splitted = windows_dataset.split('session') -train_set = splitted['session_T'] -eval_set = splitted['session_E'] +train_set = splitted['0train'] # Session train +eval_set = splitted['1test'] # Session evaluation ###################################################################### # Defining a list of transforms diff --git a/examples/datasets_io/plot_split_dataset.py b/examples/datasets_io/plot_split_dataset.py index 77f473356..0318ffd17 100644 --- a/examples/datasets_io/plot_split_dataset.py +++ b/examples/datasets_io/plot_split_dataset.py @@ -48,7 +48,7 @@ splits = dataset.split("run") print(splits) -splits["run_4"].description +splits["4"].description ############################################################################### # By row index diff --git a/examples/model_building/plot_bcic_iv_2a_moabb_cropped.py b/examples/model_building/plot_bcic_iv_2a_moabb_cropped.py index 93e2c12d5..080c01df9 100644 --- a/examples/model_building/plot_bcic_iv_2a_moabb_cropped.py +++ b/examples/model_building/plot_bcic_iv_2a_moabb_cropped.py @@ -88,7 +88,7 @@ from braindecode.datasets import MOABBDataset subject_id = 3 -dataset = MOABBDataset(dataset_name="BNCI2014001", subject_ids=[subject_id]) +dataset = MOABBDataset(dataset_name="BNCI2014_001", subject_ids=[subject_id]) from numpy import multiply @@ -107,17 +107,19 @@ factor = 1e6 preprocessors = [ - Preprocessor('pick_types', eeg=True, meg=False, stim=False), # Keep EEG sensors + Preprocessor('pick_types', eeg=True, meg=False, stim=False), + # Keep EEG sensors Preprocessor(lambda data: multiply(data, factor)), # Convert from V to uV - Preprocessor('filter', l_freq=low_cut_hz, h_freq=high_cut_hz), # Bandpass filter - Preprocessor(exponential_moving_standardize, # Exponential moving standardization + Preprocessor('filter', l_freq=low_cut_hz, h_freq=high_cut_hz), + # Bandpass filter + Preprocessor(exponential_moving_standardize, + # Exponential moving standardization factor_new=factor_new, init_block_size=init_block_size) ] # Transform the data preprocess(dataset, preprocessors, n_jobs=-1) - ###################################################################### # Create model and compute windowing parameters # --------------------------------------------- @@ -135,7 +137,6 @@ input_window_samples = 1000 - ###################################################################### # Now we create the model. To enable it to be used in cropped decoding # efficiently, we manually set the length of the final convolution layer @@ -181,7 +182,6 @@ if cuda: _ = model.cuda() - ###################################################################### # And now we transform model with strides to a model that outputs dense # prediction, so we can use it to obtain predictions for all @@ -189,7 +189,6 @@ # model.to_dense_prediction_model() - ###################################################################### # To know the models’ output shape without the last layer, we calculate the # shape of model output for a dummy input. @@ -227,7 +226,6 @@ preload=True ) - ###################################################################### # Split the dataset # ----------------- @@ -236,9 +234,8 @@ # splitted = windows_dataset.split('session') -train_set = splitted['session_T'] -valid_set = splitted['session_E'] - +train_set = splitted['0train'] # Session train +valid_set = splitted['1test'] # Session evaluation ###################################################################### # Training @@ -285,7 +282,8 @@ iterator_train__shuffle=True, batch_size=batch_size, callbacks=[ - "accuracy", ("lr_scheduler", LRScheduler('CosineAnnealingLR', T_max=n_epochs - 1)), + "accuracy", + ("lr_scheduler", LRScheduler('CosineAnnealingLR', T_max=n_epochs - 1)), ], device=device, classes=classes, @@ -294,7 +292,6 @@ # in the dataset. _ = clf.fit(train_set, y=None, epochs=n_epochs) - ###################################################################### # Plot Results # ---------------- @@ -309,7 +306,8 @@ from matplotlib.lines import Line2D # Extract loss and accuracy values for plotting from history object -results_columns = ['train_loss', 'valid_loss', 'train_accuracy', 'valid_accuracy'] +results_columns = ['train_loss', 'valid_loss', 'train_accuracy', + 'valid_accuracy'] df = pd.DataFrame(clf.history[:, results_columns], columns=results_columns, index=clf.history[:, 'epoch']) @@ -319,7 +317,8 @@ fig, ax1 = plt.subplots(figsize=(8, 3)) df.loc[:, ['train_loss', 'valid_loss']].plot( - ax=ax1, style=['-', ':'], marker='o', color='tab:blue', legend=False, fontsize=14) + ax=ax1, style=['-', ':'], marker='o', color='tab:blue', legend=False, + fontsize=14) ax1.tick_params(axis='y', labelcolor='tab:blue', labelsize=14) ax1.set_ylabel("Loss", color='tab:blue', fontsize=14) @@ -335,12 +334,13 @@ # where some data has already been plotted to ax handles = [] -handles.append(Line2D([0], [0], color='black', linewidth=1, linestyle='-', label='Train')) -handles.append(Line2D([0], [0], color='black', linewidth=1, linestyle=':', label='Valid')) +handles.append( + Line2D([0], [0], color='black', linewidth=1, linestyle='-', label='Train')) +handles.append( + Line2D([0], [0], color='black', linewidth=1, linestyle=':', label='Valid')) plt.legend(handles, [h.get_label() for h in handles], fontsize=14) plt.tight_layout() - ###################################################################### # Plot Confusion Matrix # --------------------- diff --git a/examples/model_building/plot_bcic_iv_2a_moabb_trial.py b/examples/model_building/plot_bcic_iv_2a_moabb_trial.py index 7e143f432..3a636ed56 100644 --- a/examples/model_building/plot_bcic_iv_2a_moabb_trial.py +++ b/examples/model_building/plot_bcic_iv_2a_moabb_trial.py @@ -39,7 +39,7 @@ from braindecode.datasets import MOABBDataset subject_id = 3 -dataset = MOABBDataset(dataset_name="BNCI2014001", subject_ids=[subject_id]) +dataset = MOABBDataset(dataset_name="BNCI2014_001", subject_ids=[subject_id]) ###################################################################### @@ -131,12 +131,12 @@ ###################################################################### # We can easily split the dataset using additional info stored in the # description attribute, in this case ``session`` column. We select -# ``session_T`` for training and ``session_E`` for validation. +# ``T`` for training and ``test`` for validation. # splitted = windows_dataset.split('session') -train_set = splitted['session_T'] -valid_set = splitted['session_E'] +train_set = splitted['0train'] # Session train +valid_set = splitted['1test'] # Session evaluation ###################################################################### diff --git a/examples/model_building/plot_how_train_test_and_tune.py b/examples/model_building/plot_how_train_test_and_tune.py index f99ca88fe..43cbfa902 100644 --- a/examples/model_building/plot_how_train_test_and_tune.py +++ b/examples/model_building/plot_how_train_test_and_tune.py @@ -181,7 +181,7 @@ ###################################################################### # We can easily split the dataset BCIC IV 2a dataset using additional # info stored in the description attribute, in this case the ``session`` -# column. We select ``session_T`` for training and ``session_E`` for testing. +# column. We select ``0train`` for training and ``0test`` for testing. # For other datasets, you might have to choose another column and/or column. # # .. note:: @@ -192,8 +192,8 @@ # splitted = windows_dataset.split("session") -train_set = splitted["session_T"] -test_set = splitted["session_E"] +train_set = splitted['0train'] # Session train +test_set = splitted['1test'] # Session evaluation ###################################################################### @@ -412,7 +412,7 @@ def plot_simple_train_test(ax, all_dataset, train_set, test_set): def plot_train_valid_test(ax, all_dataset, train_subset, val_subset, test_set): """Create a sample plot for training, validation, testing.""" - bd_cmap = ["#3A6190", "#683E00", "#2196F3", "#DDF2FF",] + bd_cmap = ["#3A6190", "#683E00", "#2196F3", "#DDF2FF", ] n_train, n_val, n_test = len(train_subset), len(val_subset), len(test_set) ax.barh("Original\ndataset", len(all_dataset), left=0, height=0.5, color=bd_cmap[0]) @@ -508,7 +508,7 @@ def plot_train_valid_test(ax, all_dataset, train_subset, val_subset, test_set): def plot_k_fold(ax, cv, all_dataset, X_train, y_train, test_set): """Create a sample plot for training, validation, testing.""" - bd_cmap = ["#3A6190", "#683E00", "#2196F3", "#DDF2FF",] + bd_cmap = ["#3A6190", "#683E00", "#2196F3", "#DDF2FF", ] ax.barh("Original\nDataset", len(all_dataset), left=0, height=0.5, color=bd_cmap[0]) diff --git a/examples/model_building/plot_hyperparameter_tuning_with_scikit-learn.py b/examples/model_building/plot_hyperparameter_tuning_with_scikit-learn.py index ef64e4ca9..e0cc40ad4 100644 --- a/examples/model_building/plot_hyperparameter_tuning_with_scikit-learn.py +++ b/examples/model_building/plot_hyperparameter_tuning_with_scikit-learn.py @@ -172,12 +172,12 @@ ###################################################################### # We can easily split the dataset using additional info stored in the # description attribute, in this case ``session`` column. We select -# ``session_T`` for training and ``session_E`` for evaluation. +# ``0train`` for training and ``1test`` for evaluation. # splitted = windows_dataset.split('session') -train_set = splitted['session_T'] -eval_set = splitted['session_E'] +train_set = splitted['0train'] # Session train +eval_set = splitted['1test'] # Session evaluation ###################################################################### # Create model diff --git a/examples/model_building/plot_train_in_pure_pytorch_and_pytorch_lightning.py b/examples/model_building/plot_train_in_pure_pytorch_and_pytorch_lightning.py index fb3129cfa..80fa0e9c3 100644 --- a/examples/model_building/plot_train_in_pure_pytorch_and_pytorch_lightning.py +++ b/examples/model_building/plot_train_in_pure_pytorch_and_pytorch_lightning.py @@ -73,7 +73,7 @@ from braindecode.datasets import MOABBDataset subject_id = 3 -dataset = MOABBDataset(dataset_name="BNCI2014001", subject_ids=[subject_id]) +dataset = MOABBDataset(dataset_name="BNCI2014_001", subject_ids=[subject_id]) ###################################################################### # Preprocessing, the offline transformation of the raw dataset @@ -185,7 +185,7 @@ ###################################################################### # We can easily split the dataset using additional info stored in the # description attribute, in this case the ``session`` column. We -# select ``session_T`` for training and ``session_E`` for testing. +# select ``Train`` for training and ``test`` for testing. # For other datasets, you might have to choose another column. # # .. note:: @@ -196,8 +196,8 @@ # splitted = windows_dataset.split("session") -train_set = splitted["session_T"] -test_set = splitted["session_E"] +train_set = splitted['0train'] # Session train +test_set = splitted['1test'] # Session evaluation ###################################################################### # Option 1: Pure PyTorch training loop diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..94420d76e --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,84 @@ +[project] +name = "braindecode" +version = "0.7.0" +description = "Deep learning software to decode EEG, ECG or MEG signals" +authors = [{name = "Robin Tibor Schirrmeister", email = "robintibor@gmail.com"}] +maintainers = [ + {name = "Alexandre Gramfort", email = "agramfort@meta.com"}, + {name = "Bruno Aristimunha Pinto", email = "b.aristimunha@gmail.com"}, + {name = "Robin Tibor Schirrmeister", email = "robintibor@gmail.com"} +] +license = {text = "BSD-3-Clause"} +keywords = [ + "python", + "deep-learning", + "neuroscience", + "pytorch", + "meg", + "eeg", + "neuroimaging", + "electroencephalography", + "magnetoencephalography", + "electrocorticography", + "ecog", + "electroencephalogram" +] +classifiers=[ + 'Development Status :: 3 - Alpha', + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + 'Topic :: Software Development :: Build Tools', + "Topic :: Scientific/Engineering :: Artificial Intelligence", + 'License :: OSI Approved :: BSD License', + 'Programming Language :: Python :: 3.8', +] + +requires-python = ">=3.8" +dependencies = [ + 'mne', + 'numpy', + 'pandas', + 'scipy', + 'matplotlib', + 'h5py', + 'skorch', + 'torch', + 'einops', + 'joblib', + 'torchinfo', + 'docstring_inheritance' +] + +[project.optional-dependencies] +moabb = ["moabb >= 1.0.0"] +tests = [ + 'pytest', + 'pytest-cov', + 'codecov', + 'pytest_cases' +] +docs = [ + 'sphinx_gallery', + 'sphinx_rtd_theme', + 'pydata_sphinx_theme', + 'numpydoc', + 'memory_profiler', + 'pillow', + 'ipython', + 'sphinx_design', + 'lightning', + 'seaborn' +] + +[build-system] +requires = ["setuptools"] +build-backend = "setuptools.build_meta" + +[tool.setuptools] +py-modules = [] + +[tool.setuptools.packages.find] +where = ["."] # list of folders that contain the packages (["."] by default) +include = ["braindecode"] # package names should match these glob patterns (["*"] by default) +exclude = [] # exclude packages matching these glob patterns (empty by default) +namespaces = false # to disable scanning PEP 420 namespaces (true by default) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 570feea90..000000000 --- a/requirements.txt +++ /dev/null @@ -1,12 +0,0 @@ -numpy -pandas -scipy -matplotlib -h5py -skorch -joblib -mne -einops -docstring_inheritance -torchinfo -pytest_cases diff --git a/setup.cfg b/setup.cfg index fc275b45f..9c44901c3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -12,7 +12,7 @@ release = egg_info -RDb '' # tag_build = .dev [bdist_rpm] -doc-files = doc +doc_files = doc [flake8] max-line-length = 100 diff --git a/setup.py b/setup.py deleted file mode 100644 index dba2c835a..000000000 --- a/setup.py +++ /dev/null @@ -1,71 +0,0 @@ -from setuptools import setup, find_packages # Always prefer setuptools over distutils -from codecs import open # To use a consistent encoding -from os import path - -here = path.abspath(path.dirname(__file__)) - -# Get the long description from the relevant file -with open(path.join(here, 'README.rst'), encoding='utf-8') as f: - long_description = f.read() - -# This will add __version__ to version dict -version = {} -with open(path.join(here, 'braindecode/version.py'), encoding='utf-8') as ( - version_file): - exec(version_file.read(), version) - -setup( - name='Braindecode', - - version=version['__version__'], - - description='A deep learning toolbox to decode raw time-domain EEG.', - long_description=long_description, - - # The project's main homepage. - url='https://github.com/braindecode/braindecode', - - # Author details - author='Robin Tibor Schirrmeister', - author_email='robintibor@gmail.com', - - # Choose your license - license='BSD 3-Clause', - - install_requires=['mne', 'numpy', 'pandas', 'scipy', 'matplotlib', 'h5py', 'skorch', - 'torch', 'einops', 'joblib', 'docstring_inheritance', 'torchinfo'], - extras_require={ - 'moabb': ['moabb'], - 'tests': ['pytest', 'pytest-cov', 'codecov', 'pytest_cases'], - 'docs': ['sphinx_gallery', 'sphinx_rtd_theme', 'pydata_sphinx_theme', 'numpydoc', - 'memory_profiler', 'pillow', 'ipython', 'sphinx_design', 'docstring_inheritance', - 'lightning', 'seaborn'], - }, - # tests_require = [...] - - # See https://PyPI.python.org/PyPI?%3Aaction=list_classifiers - classifiers=[ - 'Development Status :: 3 - Alpha', - - # Indicate who your project is intended for - "Intended Audience :: Developers", - "Intended Audience :: Science/Research", - 'Topic :: Software Development :: Build Tools', - - "Topic :: Scientific/Engineering :: Artificial Intelligence", - - # Pick your license as you wish (should match "license" above) - 'License :: OSI Approved :: BSD License', - - # Specify the Python versions you support here. In particular, ensure - # that you indicate whether you support Python 2, Python 3 or both. - 'Programming Language :: Python :: 3.8', - ], - - # What does your project relate to? - keywords='eeg deep-learning brain-state-decoding', - - packages=find_packages(), - include_package_data=False, - zip_safe=False, -) diff --git a/test/unit_tests/datasets/test_dataset.py b/test/unit_tests/datasets/test_dataset.py index 19908b8c3..76d5b9721 100644 --- a/test/unit_tests/datasets/test_dataset.py +++ b/test/unit_tests/datasets/test_dataset.py @@ -202,8 +202,8 @@ def test_split_dataset(concat_ds_targets): splits = concat_ds.split(split_ids) assert len(splits) == len(split_ids) assert splits.keys() == split_ids.keys() - assert (splits["train"].description["run"] == "run_1").all() - assert (splits["test"].description["run"] == "run_2").all() + assert (splits["train"].description["run"] == "1").all() + assert (splits["test"].description["run"] == "2").all() def test_metadata(concat_windows_dataset):