Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Updating the .pre-commit-yaml to fix CI. #330

Merged
merged 3 commits into from Feb 8, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml 100644 → 100755
Expand Up @@ -24,12 +24,12 @@ repos:
- id: mixed-line-ending

- repo: https://github.com/psf/black
rev: 22.12.0
rev: 23.1.0
hooks:
- id: black

- repo: https://github.com/PyCQA/isort
rev: 5.11.4
rev: 5.12.0
hooks:
- id: isort

Expand Down
1 change: 0 additions & 1 deletion moabb/analysis/results.py
Expand Up @@ -176,7 +176,6 @@ def to_dataframe(self, pipelines=None):

with h5py.File(self.filepath, "r") as f:
for digest, p_group in f.items():

# skip if not in pipeline list
if (pipelines is not None) & (digest not in digests):
continue
Expand Down
3 changes: 0 additions & 3 deletions moabb/datasets/epfl.py
Expand Up @@ -82,7 +82,6 @@ def __init__(self):
)

def _get_single_run_data(self, file_path):

# data from the .mat
data = loadmat(file_path)
signals = data["data"]
Expand Down Expand Up @@ -157,7 +156,6 @@ def _get_single_subject_data(self, subject):
sessions = {}

for file_path in sorted(file_path_list):

session_name = "session_" + file_path.split(os.sep)[-2].replace("session", "")

if session_name not in sessions.keys():
Expand All @@ -171,7 +169,6 @@ def _get_single_subject_data(self, subject):
def data_path(
self, subject, path=None, force_update=False, update_path=None, verbose=None
):

if subject not in self.subject_list:
raise (ValueError("Invalid subject number"))

Expand Down
1 change: 0 additions & 1 deletion moabb/datasets/fake.py
Expand Up @@ -52,7 +52,6 @@ def __init__(
)

def _get_single_subject_data(self, subject):

data = dict()
for session in range(self.n_sessions):
data[f"session_{session}"] = {
Expand Down
1 change: 0 additions & 1 deletion moabb/datasets/huebner_llp.py
Expand Up @@ -20,7 +20,6 @@ class _BaseVisualMatrixSpellerDataset(BaseDataset, ABC):
def __init__(
self, src_url, n_subjects, raw_slice_offset, use_blocks_as_sessions=True, **kwargs
):

self.n_channels = 31 # all channels except 5 times x_* CH and EOGvu
if kwargs["interval"] is None:
# "Epochs were windowed to [−200, 700] ms relative to the stimulus onset [...]."
Expand Down
1 change: 0 additions & 1 deletion moabb/datasets/sosulski2019.py
Expand Up @@ -163,7 +163,6 @@ def _get_single_subject_data(self, subject):
def data_path(
self, subject, path=None, force_update=False, update_path=None, verbose=None
):

if subject not in self.subject_list:
raise (ValueError("Invalid subject number"))

Expand Down
1 change: 0 additions & 1 deletion moabb/datasets/ssvep_exo.py
Expand Up @@ -81,7 +81,6 @@ def _get_single_subject_data(self, subject):
def data_path(
self, subject, path=None, force_update=False, update_path=None, verbose=None
):

runs = {s + 1: n for s, n in enumerate([2] * 6 + [3] + [2] * 2 + [4, 2, 5])}

if subject not in self.subject_list:
Expand Down
3 changes: 0 additions & 3 deletions moabb/evaluations/evaluations.py
Expand Up @@ -603,7 +603,6 @@ def _grid_search(self, param_grid, name_grid, name, clf, pipelines, X, y, cv, gr
return pipelines[name]

elif param_grid is not None and os.path.isdir(name_grid):

search = joblib.load(os.path.join(name_grid, "Grid_Search_CrossSubject.pkl"))

pipelines[name].set_params(**search.best_params_)
Expand Down Expand Up @@ -645,7 +644,6 @@ def evaluate(self, dataset, pipelines, param_grid):

# Implement Grid Search
for name, clf in pipelines.items():

name_grid = os.path.join(
str(self.hdf5_path), "GridSearch_CrossSubject", dataset.code, name
)
Expand All @@ -660,7 +658,6 @@ def evaluate(self, dataset, pipelines, param_grid):
total=n_subjects,
desc=f"{dataset.code}-CrossSubject",
):

subject = groups[test[0]]
# now we can check if this subject has results
run_pipes = self.results.not_yet_computed(pipelines, dataset, subject)
Expand Down
4 changes: 0 additions & 4 deletions moabb/pipelines/utils.py
Expand Up @@ -137,15 +137,13 @@ def generate_paradigms(pipeline_configs, context=None, logger=log):
context = context or {}
paradigms = OrderedDict()
for config in pipeline_configs:

if "paradigms" not in config.keys():
logger.error("{} must have a 'paradigms' key.".format(config))
continue

# iterate over paradigms

for paradigm in config["paradigms"]:

# check if it is in the context parameters file
if len(context) > 0:
if paradigm not in context.keys():
Expand Down Expand Up @@ -174,11 +172,9 @@ def generate_paradigms(pipeline_configs, context=None, logger=log):


def generate_param_grid(pipeline_configs, context=None, logger=log):

context = context or {}
param_grid = {}
for config in pipeline_configs:

if "paradigms" not in config:
logger.error("{} must have a 'paradigms' key.".format(config))
continue
Expand Down
1 change: 0 additions & 1 deletion moabb/tests/datasets.py
Expand Up @@ -33,7 +33,6 @@ def test_fake_dataset(self):
n_runs = 2

for paradigm in ["imagery", "p300", "ssvep"]:

ds = FakeDataset(
n_sessions=n_sessions,
n_runs=n_runs,
Expand Down