diff --git a/doc/sphinxext/gen_commands.py b/doc/sphinxext/gen_commands.py index c369bba6db0..730d3e47caf 100644 --- a/doc/sphinxext/gen_commands.py +++ b/doc/sphinxext/gen_commands.py @@ -1,10 +1,12 @@ # License: BSD-3-Clause # Copyright the MNE-Python contributors. import glob +import os +import shutil from importlib import import_module from pathlib import Path -from mne.utils import ArgvSetter, _replace_md5 +from mne.utils import ArgvSetter, hashfunc def setup(app): @@ -106,6 +108,17 @@ def generate_commands_rst(app=None): _replace_md5(str(out_fname)) +def _replace_md5(fname): + """Replace a file based on MD5sum.""" + # adapted from sphinx-gallery + assert fname.endswith(".new") + fname_old = fname[:-4] + if os.path.isfile(fname_old) and hashfunc(fname) == hashfunc(fname_old): + os.remove(fname) + else: + shutil.move(fname, fname_old) + + # This is useful for testing/iterating to see what the result looks like if __name__ == "__main__": generate_commands_rst() diff --git a/mne/_fiff/reference.py b/mne/_fiff/reference.py index d0f4b08f76b..2cf15977848 100644 --- a/mne/_fiff/reference.py +++ b/mne/_fiff/reference.py @@ -25,33 +25,6 @@ from .proj import _has_eeg_average_ref_proj, make_eeg_average_ref_proj, setup_proj -def _copy_channel(inst, ch_name, new_ch_name): - """Add a copy of a channel specified by ch_name. - - Input data can be in the form of Raw, Epochs or Evoked. - - The instance object is modified inplace. - - Parameters - ---------- - inst : instance of Raw | Epochs | Evoked - Data containing the EEG channels - ch_name : str - Name of the channel to copy. - new_ch_name : str - Name given to the copy of the channel. - - Returns - ------- - inst : instance of Raw | Epochs | Evoked - The data with a copy of a given channel. - """ - new_inst = inst.copy().pick([ch_name]) - new_inst.rename_channels({ch_name: new_ch_name}) - inst.add_channels([new_inst], force_update_info=True) - return inst - - def _check_before_reference(inst, ref_from, ref_to, ch_type): """Prepare instance for referencing.""" # Check to see that data is preloaded diff --git a/mne/_fiff/utils.py b/mne/_fiff/utils.py index 09cc3046d6c..78cab22c8a9 100644 --- a/mne/_fiff/utils.py +++ b/mne/_fiff/utils.py @@ -301,31 +301,6 @@ def _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg, misc): return chs -def _synthesize_stim_channel(events, n_samples): - """Synthesize a stim channel from events read from an event file. - - Parameters - ---------- - events : array, shape (n_events, 3) - Each row representing an event. - n_samples : int - The number of samples. - - Returns - ------- - stim_channel : array, shape (n_samples,) - An array containing the whole recording's event marking. - """ - # select events overlapping buffer - events = events.copy() - events[events[:, 1] < 1, 1] = 1 - # create output buffer - stim_channel = np.zeros(n_samples, int) - for onset, duration, trigger in events: - stim_channel[onset : onset + duration] = trigger - return stim_channel - - def _construct_bids_filename(base, ext, part_idx, validate=True): """Construct a BIDS compatible filename for split files.""" # insert index in filename diff --git a/mne/_fiff/write.py b/mne/_fiff/write.py index ea43d37562e..5604e25d2c1 100644 --- a/mne/_fiff/write.py +++ b/mne/_fiff/write.py @@ -414,21 +414,12 @@ def write_float_sparse_rcs(fid, kind, mat): return write_float_sparse(fid, kind, mat, fmt="csr") -def write_float_sparse_ccs(fid, kind, mat): - """Write a single-precision sparse compressed column matrix tag.""" - return write_float_sparse(fid, kind, mat, fmt="csc") - - def write_float_sparse(fid, kind, mat, fmt="auto"): """Write a single-precision floating-point sparse matrix tag.""" if fmt == "auto": fmt = "csr" if isinstance(mat, csr_matrix) else "csc" - if fmt == "csr": - need = csr_matrix - matrix_type = FIFF.FIFFT_SPARSE_RCS_MATRIX - else: - need = csc_matrix - matrix_type = FIFF.FIFFT_SPARSE_CCS_MATRIX + need = csr_matrix if fmt == "csr" else csc_matrix + matrix_type = getattr(FIFF, f"FIFFT_SPARSE_{fmt[-1].upper()}CS_MATRIX") _validate_type(mat, need, "sparse") matrix_type = matrix_type | FIFF.FIFFT_MATRIX | FIFF.FIFFT_FLOAT nnzm = mat.nnz diff --git a/mne/_freesurfer.py b/mne/_freesurfer.py index 52d7c24afeb..3f3a2fcbaae 100644 --- a/mne/_freesurfer.py +++ b/mne/_freesurfer.py @@ -506,23 +506,6 @@ def estimate_head_mri_t(subject, subjects_dir=None, verbose=None): return invert_transform(compute_native_head_t(montage)) -def _ensure_image_in_surface_RAS(image, subject, subjects_dir): - """Check if the image is in Freesurfer surface RAS space.""" - nib = _import_nibabel("load a volume image") - if not isinstance(image, nib.spatialimages.SpatialImage): - image = nib.load(image) - image = nib.MGHImage(image.dataobj.astype(np.float32), image.affine) - fs_img = nib.load(op.join(subjects_dir, subject, "mri", "brain.mgz")) - if not np.allclose(image.affine, fs_img.affine, atol=1e-6): - raise RuntimeError( - "The `image` is not aligned to Freesurfer " - "surface RAS space. This space is required as " - "it is the space where the anatomical " - "segmentation and reconstructed surfaces are" - ) - return image # returns MGH image for header - - def _get_affine_from_lta_info(lines): """Get the vox2ras affine from lta file info.""" volume_data = np.loadtxt([line.split("=")[1] for line in lines]) diff --git a/mne/channels/layout.py b/mne/channels/layout.py index 4b9968874b4..83df9f377d7 100644 --- a/mne/channels/layout.py +++ b/mne/channels/layout.py @@ -970,61 +970,6 @@ def _pair_grad_sensors( return picks -# this function is used to pair grad when info is not present -# it is the case of Projection that don't have the info. -def _pair_grad_sensors_ch_names_vectorview(ch_names): - """Find the indices for pairing grad channels in a Vectorview system. - - Parameters - ---------- - ch_names : list of str - A list of channel names. - - Returns - ------- - indexes : list of int - Indices of the grad channels, ordered in pairs. - """ - pairs = defaultdict(list) - for i, name in enumerate(ch_names): - if name.startswith("MEG"): - if name.endswith(("2", "3")): - key = name[-4:-1] - pairs[key].append(i) - - pairs = [p for p in pairs.values() if len(p) == 2] - - grad_chs = sum(pairs, []) - return grad_chs - - -# this function is used to pair grad when info is not present -# it is the case of Projection that don't have the info. -def _pair_grad_sensors_ch_names_neuromag122(ch_names): - """Find the indices for pairing grad channels in a Neuromag 122 system. - - Parameters - ---------- - ch_names : list of str - A list of channel names. - - Returns - ------- - indexes : list of int - Indices of the grad channels, ordered in pairs. - """ - pairs = defaultdict(list) - for i, name in enumerate(ch_names): - if name.startswith("MEG"): - key = (int(name[-3:]) - 1) // 2 - pairs[key].append(i) - - pairs = [p for p in pairs.values() if len(p) == 2] - - grad_chs = sum(pairs, []) - return grad_chs - - def _merge_ch_data(data, ch_type, names, method="rms"): """Merge data from channel pairs. diff --git a/mne/channels/tests/test_montage.py b/mne/channels/tests/test_montage.py index 7f6af375ca9..706d830a566 100644 --- a/mne/channels/tests/test_montage.py +++ b/mne/channels/tests/test_montage.py @@ -87,10 +87,6 @@ locs_montage_fname = data_path / "EEGLAB" / "test_chans.locs" evoked_fname = data_path / "montage" / "level2_raw-ave.fif" eeglab_fname = data_path / "EEGLAB" / "test_raw.set" -bdf_fname1 = data_path / "BDF" / "test_generator_2.bdf" -bdf_fname2 = data_path / "BDF" / "test_bdf_stim_channel.bdf" -egi_fname1 = data_path / "EGI" / "test_egi.mff" -cnt_fname = data_path / "CNT" / "scan41_short.cnt" fnirs_dname = data_path / "NIRx" / "nirscout" / "nirx_15_2_recording_w_short" mgh70_fname = data_path / "SSS" / "mgh70_raw.fif" subjects_dir = data_path / "subjects" @@ -104,10 +100,8 @@ fif_fname = io_dir / "tests" / "data" / "test_raw.fif" edf_path = io_dir / "edf" / "tests" / "data" / "test.edf" bdf_path = io_dir / "edf" / "tests" / "data" / "test_bdf_eeglab.mat" -egi_fname2 = io_dir / "egi" / "tests" / "data" / "test_egi.raw" vhdr_path = io_dir / "brainvision" / "tests" / "data" / "test.vhdr" ctf_fif_fname = io_dir / "tests" / "data" / "test_ctf_comp_raw.fif" -nicolet_fname = io_dir / "nicolet" / "tests" / "data" / "test_nicolet_raw.data" def _make_toy_raw(n_channels): @@ -1108,17 +1102,6 @@ def test_egi_dig_montage(tmp_path): _check_roundtrip(dig_montage_in_head, fname_temp) -def _pop_montage(dig_montage, ch_name): - # remove reference that was not used in old API - name_idx = dig_montage.ch_names.index(ch_name) - dig_idx = dig_montage._get_dig_names().index(ch_name) - - del dig_montage.dig[dig_idx] - del dig_montage.ch_names[name_idx] - for k in range(dig_idx, len(dig_montage.dig)): - dig_montage.dig[k]["ident"] -= 1 - - @testing.requires_testing_data def test_read_dig_captrak(tmp_path): """Test reading a captrak montage file.""" @@ -1444,21 +1427,6 @@ def _check_roundtrip(montage, fname, coord_frame="head"): assert_dig_allclose(montage, montage_read) -def _fake_montage(ch_names): - pos = np.random.RandomState(42).randn(len(ch_names), 3) - return make_dig_montage(ch_pos=dict(zip(ch_names, pos)), coord_frame="head") - - -cnt_ignore_warns = [ - pytest.mark.filterwarnings( - "ignore:.*Could not parse meas date from the header. Setting to None." - ), - pytest.mark.filterwarnings( - "ignore:.*Could not define the number of bytes automatically. Defaulting to 2." - ), -] - - def test_digmontage_constructor_errors(): """Test proper error messaging.""" with pytest.raises(ValueError, match="does not match the number"): @@ -1966,7 +1934,7 @@ def test_montage_add_fiducials(): subjects_dir = data_path / "subjects" subject = "sample" fid_fname = subjects_dir / subject / "bem" / "sample-fiducials.fif" - test_fids, test_coord_frame = read_fiducials(fid_fname) + test_fids, _ = read_fiducials(fid_fname) test_fids = np.array([f["r"] for f in test_fids]) # create test montage and add estimated fiducials diff --git a/mne/chpi.py b/mne/chpi.py index 090459e5855..86ed23ce867 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -1085,12 +1085,6 @@ def _fit_chpi_quat_subset(coil_dev_rrs, coil_head_rrs, use_idx): return quat, g, np.array(out_idx, int) -@jit() -def _unit_quat_constraint(x): - """Constrain our 3 quaternion rot params (ignoring w) to have norm <= 1.""" - return 1 - (x * x).sum() - - @verbose def compute_chpi_snr( raw, t_step_min=0.01, t_window="auto", ext_order=1, tmin=0, tmax=None, verbose=None diff --git a/mne/coreg.py b/mne/coreg.py index 7de243c7874..d3feaa7b81f 100644 --- a/mne/coreg.py +++ b/mne/coreg.py @@ -760,28 +760,6 @@ def _is_mri_subject(subject, subjects_dir=None): ) -def _is_scaled_mri_subject(subject, subjects_dir=None): - """Check whether a directory in subjects_dir is a scaled mri subject. - - Parameters - ---------- - subject : str - Name of the potential subject/directory. - subjects_dir : None | path-like - Override the SUBJECTS_DIR environment variable. - - Returns - ------- - is_scaled_mri_subject : bool - Whether ``subject`` is a scaled mri subject. - """ - subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) - if not _is_mri_subject(subject, subjects_dir): - return False - fname = subjects_dir / subject / "MRI scaling parameters.cfg" - return fname.exists() - - def _mri_subject_has_bem(subject, subjects_dir=None): """Check whether an mri subject has a file matching the bem pattern. @@ -1479,7 +1457,6 @@ def __init__( self._scale_mode = None self._on_defects = on_defects - self._rot_trans = None self._default_parameters = np.array( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0] ) @@ -1487,7 +1464,6 @@ def __init__( self._rotation = self._default_parameters[:3] self._translation = self._default_parameters[3:6] self._scale = self._default_parameters[6:9] - self._icp_iterations = 20 self._icp_angle = 0.2 self._icp_distance = 0.2 self._icp_scale = 0.2 @@ -1869,10 +1845,6 @@ def _has_rpa_data(self): def _processed_high_res_mri_points(self): return self._get_processed_mri_points("high") - @property - def _processed_low_res_mri_points(self): - return self._get_processed_mri_points("low") - def _get_processed_mri_points(self, res): bem = self._bem_low_res if res == "low" else self._bem_high_res points = bem["rr"].copy() diff --git a/mne/decoding/time_delaying_ridge.py b/mne/decoding/time_delaying_ridge.py index 3ef2403bf34..520cb99279e 100644 --- a/mne/decoding/time_delaying_ridge.py +++ b/mne/decoding/time_delaying_ridge.py @@ -40,8 +40,9 @@ def _compute_corrs( assert X.shape[:2] == y.shape[:2] len_trf = smax - smin len_x, n_epochs, n_ch_x = X.shape - len_y, n_epcohs, n_ch_y = y.shape + len_y, n_epochs_y, n_ch_y = y.shape assert len_x == len_y + assert n_epochs == n_epochs_y n_fft = next_fast_len(2 * X.shape[0] - 1) diff --git a/mne/epochs.py b/mne/epochs.py index 43f8baf70f3..8cc2c2df386 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -4480,18 +4480,6 @@ def bootstrap(epochs, random_state=None): return epochs_bootstrap -def _check_merge_epochs(epochs_list): - """Aux function.""" - if len({tuple(epochs.event_id.items()) for epochs in epochs_list}) != 1: - raise NotImplementedError("Epochs with unequal values for event_id") - if len({epochs.tmin for epochs in epochs_list}) != 1: - raise NotImplementedError("Epochs with unequal values for tmin") - if len({epochs.tmax for epochs in epochs_list}) != 1: - raise NotImplementedError("Epochs with unequal values for tmax") - if len({epochs.baseline for epochs in epochs_list}) != 1: - raise NotImplementedError("Epochs with unequal values for baseline") - - def _concatenate_epochs( epochs_list, *, with_data=True, add_offset=True, on_mismatch="raise" ): diff --git a/mne/filter.py b/mne/filter.py index dc25776f980..88957fae043 100644 --- a/mne/filter.py +++ b/mne/filter.py @@ -38,30 +38,6 @@ _length_factors = dict(hann=3.1, hamming=3.3, blackman=5.0) -def is_power2(num): - """Test if number is a power of 2. - - Parameters - ---------- - num : int - Number. - - Returns - ------- - b : bool - True if is power of 2. - - Examples - -------- - >>> is_power2(2 ** 3) - True - >>> is_power2(5) - False - """ - num = int(num) - return num != 0 and ((num & (num - 1)) == 0) - - def next_fast_len(target): """Find the next fast size of input data to `fft`, for zero-padding, etc. diff --git a/mne/forward/_compute_forward.py b/mne/forward/_compute_forward.py index db62bf60152..a02a8839f4f 100644 --- a/mne/forward/_compute_forward.py +++ b/mne/forward/_compute_forward.py @@ -55,7 +55,7 @@ def _check_coil_frame(coils, coord_frame, bem): if coord_frame != FIFF.FIFFV_COORD_MRI: if coord_frame == FIFF.FIFFV_COORD_HEAD: # Make a transformed duplicate - coils, coord_Frame = _dup_coil_set(coils, coord_frame, bem["head_mri_t"]) + coils, coord_frame = _dup_coil_set(coils, coord_frame, bem["head_mri_t"]) else: raise RuntimeError(f"Bad coil coordinate frame {coord_frame}") return coils, coord_frame diff --git a/mne/gui/tests/test_coreg.py b/mne/gui/tests/test_coreg.py index aea6fba08ff..0bafc98d83a 100644 --- a/mne/gui/tests/test_coreg.py +++ b/mne/gui/tests/test_coreg.py @@ -17,14 +17,12 @@ from mne.coreg import Coregistration from mne.datasets import testing from mne.io import read_info -from mne.io.kit.tests import data_dir as kit_data_dir from mne.utils import catch_logging, get_config from mne.viz import _3d data_path = testing.data_path(download=False) raw_path = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" fname_trans = data_path / "MEG" / "sample" / "sample_audvis_trunc-trans.fif" -kit_raw_path = kit_data_dir / "test_bin_raw.fif" subjects_dir = data_path / "subjects" fid_fname = subjects_dir / "sample" / "bem" / "sample-fiducials.fif" ctf_raw_path = data_path / "CTF" / "catch-alp-good-f.ds" diff --git a/mne/inverse_sparse/mxne_optim.py b/mne/inverse_sparse/mxne_optim.py index f9142c89eab..2a7388f8686 100644 --- a/mne/inverse_sparse/mxne_optim.py +++ b/mne/inverse_sparse/mxne_optim.py @@ -1622,7 +1622,7 @@ def g_time_prime_inv(Z): w_time = 1.0 / w_time w_time[w_time < 0.0] = 0.0 - X, Z, active_set_, E_, _ = _tf_mixed_norm_solver_bcd_active_set( + X, Z, active_set_, _, _ = _tf_mixed_norm_solver_bcd_active_set( M, G[:, active_set], alpha_space, diff --git a/mne/io/base.py b/mne/io/base.py index f10228a70cf..9ff669a4e41 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -299,7 +299,6 @@ def __init__( # unit orig_units = _check_orig_units(orig_units) self._orig_units = orig_units or dict() # always a dict - self._projectors = list() self._projector = None self._dtype_ = dtype self.set_annotations(None) @@ -2478,26 +2477,6 @@ def _allocate_data(preload, shape, dtype): return data -def _index_as_time(index, sfreq, first_samp=0, use_first_samp=False): - """Convert indices to time. - - Parameters - ---------- - index : list-like | int - List of ints or int representing points in time. - use_first_samp : boolean - If True, the time returned is relative to the session onset, else - relative to the recording onset. - - Returns - ------- - times : ndarray - Times corresponding to the index supplied. - """ - times = np.atleast_1d(index) + (first_samp if use_first_samp else 0) - return times / sfreq - - def _convert_slice(sel): if len(sel) and (np.diff(sel) == 1).all(): return slice(sel[0], sel[-1] + 1) @@ -2638,7 +2617,6 @@ def __init__(self): self._first_time = None self._last_time = None self._cals = None - self._rawdir = None self._projector = None @property diff --git a/mne/io/brainvision/tests/test_brainvision.py b/mne/io/brainvision/tests/test_brainvision.py index c65a3865e64..51e63fa082c 100644 --- a/mne/io/brainvision/tests/test_brainvision.py +++ b/mne/io/brainvision/tests/test_brainvision.py @@ -59,9 +59,6 @@ # Test for nanovolts as unit vhdr_units_path = data_dir / "test_units.vhdr" -# Test bad date -vhdr_bad_date = data_dir / "test_bad_date.vhdr" - eeg_bin = data_dir / "test_bin_raw.fif" eog = ["HL", "HR", "Vb"] diff --git a/mne/io/bti/bti.py b/mne/io/bti/bti.py index b6a66a3e2f6..cdcf75ae294 100644 --- a/mne/io/bti/bti.py +++ b/mne/io/bti/bti.py @@ -42,9 +42,6 @@ read_uint32, ) -FIFF_INFO_DIG_FIELDS = ("kind", "ident", "r", "coord_frame") -FIFF_INFO_DIG_DEFAULTS = (None, None, None, FIFF.FIFFV_COORD_HEAD) - BTI_WH2500_REF_MAG = ("MxA", "MyA", "MzA", "MxaA", "MyaA", "MzaA") BTI_WH2500_REF_GRAD = ("GxxA", "GyyA", "GyxA", "GzaA", "GzyA") @@ -287,14 +284,14 @@ def _read_config(fname): elif kind == BTI.UB_B_COH_POINTS: dta["n_points"] = read_int32(fid) dta["status"] = read_int32(fid) - dta["points"] = [] - for pnt in range(16): - d = { + dta["points"] = [ + { "pos": read_double_matrix(fid, 1, 3), "direction": read_double_matrix(fid, 1, 3), "error": read_double(fid), } - dta["points"] += [d] + for _ in range(16) + ] elif kind == BTI.UB_B_CCP_XFM_BLOCK: dta["method"] = read_int32(fid) @@ -360,7 +357,7 @@ def _read_config(fname): ) dta["subsys"] = list() - for sub_key in range(num_subsys): + for _ in range(num_subsys): d = { "subsys_type": read_int16(fid), "subsys_num": read_int16(fid), @@ -573,7 +570,7 @@ def _read_config(fname): fid.seek(4, 1) dev["reserved"] = read_str(fid, 32) dta.update({"dev": dev, "loops": []}) - for loop in range(dev["total_loops"]): + for _ in range(dev["total_loops"]): d = { "position": read_double_matrix(fid, 1, 3), "orientation": read_double_matrix(fid, 1, 3), @@ -774,82 +771,6 @@ def _read_pfid_ed(fid): return out -def _read_coil_def(fid): - """Read coil definition.""" - coildef = { - "position": read_double_matrix(fid, 1, 3), - "orientation": read_double_matrix(fid, 1, 3), - "radius": read_double(fid), - "wire_radius": read_double(fid), - "turns": read_int16(fid), - } - - fid.seek(fid, 2, 1) - coildef["checksum"] = read_int32(fid) - coildef["reserved"] = read_str(fid, 32) - - -def _read_ch_config(fid): - """Read BTi channel config.""" - cfg = { - "name": read_str(fid, BTI.FILE_CONF_CH_NAME), - "chan_no": read_int16(fid), - "ch_type": read_uint16(fid), - "sensor_no": read_int16(fid), - } - - fid.seek(fid, BTI.FILE_CONF_CH_NEXT, 1) - - cfg.update( - { - "gain": read_float(fid), - "units_per_bit": read_float(fid), - "yaxis_label": read_str(fid, BTI.FILE_CONF_CH_YLABEL), - "aar_val": read_double(fid), - "checksum": read_int32(fid), - "reserved": read_str(fid, BTI.FILE_CONF_CH_RESERVED), - } - ) - - _correct_offset(fid) - - # Then the channel info - ch_type, chan = cfg["ch_type"], dict() - chan["dev"] = { - "size": read_int32(fid), - "checksum": read_int32(fid), - "reserved": read_str(fid, 32), - } - if ch_type in [BTI.CHTYPE_MEG, BTI.CHTYPE_REF]: - chan["loops"] = [_read_coil_def(fid) for d in range(chan["dev"]["total_loops"])] - - elif ch_type == BTI.CHTYPE_EEG: - chan["impedance"] = read_float(fid) - chan["padding"] = read_str(fid, BTI.FILE_CONF_CH_PADDING) - chan["transform"] = read_transform(fid) - chan["reserved"] = read_char(fid, BTI.FILE_CONF_CH_RESERVED) - - elif ch_type in [ - BTI.CHTYPE_TRIGGER, - BTI.CHTYPE_EXTERNAL, - BTI.CHTYPE_UTILITY, - BTI.CHTYPE_DERIVED, - ]: - chan["user_space_size"] = read_int32(fid) - if ch_type == BTI.CHTYPE_TRIGGER: - fid.seek(2, 1) - chan["reserved"] = read_str(fid, BTI.FILE_CONF_CH_RESERVED) - - elif ch_type == BTI.CHTYPE_SHORTED: - chan["reserved"] = read_str(fid, BTI.FILE_CONF_CH_RESERVED) - - cfg["chan"] = chan - - _correct_offset(fid) - - return cfg - - def _read_bti_header_pdf(pdf_fname): """Read header from pdf file.""" with _bti_open(pdf_fname, "rb") as fid: @@ -908,22 +829,20 @@ def _read_bti_header_pdf(pdf_fname): # actual header ends here, so dar seems ok. - info["epochs"] = [_read_epoch(fid) for epoch in range(info["total_epochs"])] + info["epochs"] = [_read_epoch(fid) for _ in range(info["total_epochs"])] - info["chs"] = [_read_channel(fid) for ch in range(info["total_chans"])] + info["chs"] = [_read_channel(fid) for _ in range(info["total_chans"])] - info["events"] = [_read_event(fid) for event in range(info["total_events"])] + info["events"] = [_read_event(fid) for _ in range(info["total_events"])] - info["processes"] = [ - _read_process(fid) for process in range(info["total_processes"]) - ] + info["processes"] = [_read_process(fid) for _ in range(info["total_processes"])] info["assocfiles"] = [ - _read_assoc_file(fid) for af in range(info["total_associated_files"]) + _read_assoc_file(fid) for _ in range(info["total_associated_files"]) ] info["edclasses"] = [ - _read_pfid_ed(fid) for ed_class in range(info["total_ed_classes"]) + _read_pfid_ed(fid) for _ in range(info["total_ed_classes"]) ] info["extra_data"] = fid.read(start - fid.tell()) diff --git a/mne/io/bti/read.py b/mne/io/bti/read.py index 6489a77850a..0ca086f3141 100644 --- a/mne/io/bti/read.py +++ b/mne/io/bti/read.py @@ -33,21 +33,6 @@ def read_char(fid, count=1): return _unpack_simple(fid, f">S{count}", "S") -def read_bool(fid): - """Read bool value from bti file.""" - return _unpack_simple(fid, ">?", bool) - - -def read_uint8(fid): - """Read unsigned 8bit integer from bti file.""" - return _unpack_simple(fid, ">u1", np.uint8) - - -def read_int8(fid): - """Read 8bit integer from bti file.""" - return _unpack_simple(fid, ">i1", np.int8) - - def read_uint16(fid): """Read unsigned 16bit integer from bti file.""" return _unpack_simple(fid, ">u2", np.uint32) @@ -68,11 +53,6 @@ def read_int32(fid): return _unpack_simple(fid, ">i4", np.int32) -def read_uint64(fid): - """Read unsigned 64bit integer from bti file.""" - return _unpack_simple(fid, ">u8", np.uint64) - - def read_int64(fid): """Read 64bit integer from bti file.""" return _unpack_simple(fid, ">u8", np.int64) diff --git a/mne/io/edf/tests/test_edf.py b/mne/io/edf/tests/test_edf.py index bc00b605ca6..de9667dc3d7 100644 --- a/mne/io/edf/tests/test_edf.py +++ b/mne/io/edf/tests/test_edf.py @@ -51,8 +51,6 @@ duplicate_channel_labels_path = data_dir / "duplicate_channel_labels.edf" edf_uneven_path = data_dir / "test_uneven_samp.edf" bdf_eeglab_path = data_dir / "test_bdf_eeglab.mat" -edf_eeglab_path = data_dir / "test_edf_eeglab.mat" -edf_uneven_eeglab_path = data_dir / "test_uneven_samp.mat" edf_stim_channel_path = data_dir / "test_edf_stim_channel.edf" edf_txt_stim_channel_path = data_dir / "test_edf_stim_channel.txt" diff --git a/mne/io/eeglab/tests/test_eeglab.py b/mne/io/eeglab/tests/test_eeglab.py index ebd5a6a6706..43e523de997 100644 --- a/mne/io/eeglab/tests/test_eeglab.py +++ b/mne/io/eeglab/tests/test_eeglab.py @@ -36,16 +36,13 @@ raw_fname_event_duration = base_dir / "test_raw_event_duration.set" epochs_fname_mat = base_dir / "test_epochs.set" epochs_fname_onefile_mat = base_dir / "test_epochs_onefile.set" -raw_mat_fnames = [raw_fname_mat, raw_fname_onefile_mat] epochs_mat_fnames = [epochs_fname_mat, epochs_fname_onefile_mat] raw_fname_chanloc = base_dir / "test_raw_chanloc.set" raw_fname_chanloc_fids = base_dir / "test_raw_chanloc_fids.set" raw_fname_2021 = base_dir / "test_raw_2021.set" raw_fname_h5 = base_dir / "test_raw_h5.set" -raw_fname_onefile_h5 = base_dir / "test_raw_onefile_h5.set" epochs_fname_h5 = base_dir / "test_epochs_h5.set" epochs_fname_onefile_h5 = base_dir / "test_epochs_onefile_h5.set" -raw_h5_fnames = [raw_fname_h5, raw_fname_onefile_h5] epochs_h5_fnames = [epochs_fname_h5, epochs_fname_onefile_h5] montage_path = base_dir / "test_chans.locs" diff --git a/mne/io/egi/egimff.py b/mne/io/egi/egimff.py index 6d5559a966e..b8093a5e9b5 100644 --- a/mne/io/egi/egimff.py +++ b/mne/io/egi/egimff.py @@ -201,25 +201,6 @@ def _read_mff_header(filepath): return summaryinfo -class _FixedOffset(datetime.tzinfo): - """Fixed offset in minutes east from UTC. - - Adapted from the official Python documentation. - """ - - def __init__(self, offset): - self._offset = datetime.timedelta(minutes=offset) - - def utcoffset(self, dt): - return self._offset - - def tzname(self, dt): - return "MFF" - - def dst(self, dt): - return datetime.timedelta(0) - - def _read_header(input_fname): """Obtain the headers from the file package mff. diff --git a/mne/io/fiff/tests/test_raw_fiff.py b/mne/io/fiff/tests/test_raw_fiff.py index dc3c732979d..f45c9cc6701 100644 --- a/mne/io/fiff/tests/test_raw_fiff.py +++ b/mne/io/fiff/tests/test_raw_fiff.py @@ -60,7 +60,6 @@ fif_bad_marked_fname = base_dir / "test_withbads_raw.fif" bad_file_works = base_dir / "test_bads.txt" bad_file_wrong = base_dir / "test_wrong_bads.txt" -hp_fname = base_dir / "test_chpi_raw_hp.txt" hp_fif_fname = base_dir / "test_chpi_raw_sss.fif" diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py index 9a0b301087f..f11f1fa6194 100644 --- a/mne/io/kit/kit.py +++ b/mne/io/kit/kit.py @@ -560,6 +560,7 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose= sqd["nchan"] = channel_count = int(np.fromfile(fid, INT32, 1)[0]) comment = _read_name(fid, n=256) create_time, last_modified_time = np.fromfile(fid, INT32, 2) + del last_modified_time fid.seek(KIT.INT * 3, SEEK_CUR) # reserved dewar_style = np.fromfile(fid, INT32, 1)[0] fid.seek(KIT.INT * 3, SEEK_CUR) # spare @@ -575,6 +576,7 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose= else: adc_range = np.fromfile(fid, FLOAT64, 1)[0] adc_polarity, adc_allocated, adc_stored = np.fromfile(fid, INT32, 3) + del adc_polarity system_name = system_name.replace("\x00", "") system_name = system_name.strip().replace("\n", "/") model_name = model_name.replace("\x00", "") @@ -793,6 +795,7 @@ def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose= mri_type, meg_type, mri_done, this_meg_done = np.fromfile( fid, INT32, 4 ) + del mri_type, meg_type, mri_done meg_done[mi] = bool(this_meg_done) fid.seek(3 * KIT.DOUBLE, SEEK_CUR) # mri_pos mrk[mi] = np.fromfile(fid, FLOAT64, 3) diff --git a/mne/io/neuralynx/tests/test_neuralynx.py b/mne/io/neuralynx/tests/test_neuralynx.py index ceebdd3c975..09f5a93ee22 100644 --- a/mne/io/neuralynx/tests/test_neuralynx.py +++ b/mne/io/neuralynx/tests/test_neuralynx.py @@ -144,7 +144,7 @@ def test_neuralynx(): raw.ch_names == expected_chan_names ), "labels in raw.ch_names don't match expected channel names" - mne_y, mne_t = raw.get_data(return_times=True) # in V + mne_y = raw.get_data() # in V # ==== NeuralynxIO ==== # nlx_reader = NeuralynxIO(dirname=testing_path, exclude_filename=excluded_ncs_files) diff --git a/mne/io/persyst/persyst.py b/mne/io/persyst/persyst.py index d0f05893dab..535123d896d 100644 --- a/mne/io/persyst/persyst.py +++ b/mne/io/persyst/persyst.py @@ -449,6 +449,7 @@ def _process_lay_line(line, section): if section == "comments": # Persyst Comments output 5 variables "," separated time_sec, duration, state, var_type, text = line.split(",", 4) + del var_type, state status = 2 key = text value = (time_sec, duration) diff --git a/mne/io/snirf/tests/test_snirf.py b/mne/io/snirf/tests/test_snirf.py index f298a030bea..9262fa55b89 100644 --- a/mne/io/snirf/tests/test_snirf.py +++ b/mne/io/snirf/tests/test_snirf.py @@ -55,7 +55,6 @@ snirf_nirsport2_20219 = ( testing_path / "SNIRF" / "NIRx" / "NIRSport2" / "2021.9" / "2021-10-01_002.snirf" ) -nirx_nirsport2_20219 = testing_path / "NIRx" / "nirsport_v2" / "aurora_2021_9" # Kernel kernel_hb = testing_path / "SNIRF" / "Kernel" / "Flow50" / "Portal_2021_11" / "hb.snirf" diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py index 4dd41914664..76a40674a1a 100644 --- a/mne/minimum_norm/tests/test_inverse.py +++ b/mne/minimum_norm/tests/test_inverse.py @@ -85,7 +85,6 @@ subjects_dir = test_path / "subjects" s_path_bem = subjects_dir / "sample" / "bem" fname_bem = s_path_bem / "sample-320-320-320-bem-sol.fif" -fname_bem_homog = s_path_bem / "sample-320-bem-sol.fif" src_fname = s_path_bem / "sample-oct-4-src.fif" snr = 3.0 diff --git a/mne/misc.py b/mne/misc.py index 9313f048cbc..3b362643214 100644 --- a/mne/misc.py +++ b/mne/misc.py @@ -81,19 +81,3 @@ def read_reject_parameters(fname): reject[reject_pynames[reject_names.index(words[0])]] = float(words[1]) return reject - - -def read_flat_parameters(fname): - """Read flat channel rejection parameters from .cov or .ave config file.""" - with open(fname) as f: - lines = f.readlines() - - reject_names = ["gradFlat", "magFlat", "eegFlat", "eogFlat", "ecgFlat"] - reject_pynames = ["grad", "mag", "eeg", "eog", "ecg"] - flat = dict() - for line in lines: - words = line.split() - if words[0] in reject_names: - flat[reject_pynames[reject_names.index(words[0])]] = float(words[1]) - - return flat diff --git a/mne/morph.py b/mne/morph.py index 4c987263925..1a54647c7c9 100644 --- a/mne/morph.py +++ b/mne/morph.py @@ -806,17 +806,17 @@ def _check_zooms(mri_from, zooms, zooms_src_to): return zooms -def _resample_from_to(img, affine, to_vox_map): - # Wrap to dipy for speed, equivalent to: - # from nibabel.processing import resample_from_to - # from nibabel.spatialimages import SpatialImage - # return _get_img_fdata( - # resample_from_to(SpatialImage(img, affine), to_vox_map, order=1)) - import dipy.align.imaffine - - return dipy.align.imaffine.AffineMap( - None, to_vox_map[0], to_vox_map[1], img.shape, affine - ).transform(img, resample_only=True) +# def _resample_from_to(img, affine, to_vox_map): +# # Wrap to dipy for speed, equivalent to: +# # from nibabel.processing import resample_from_to +# # from nibabel.spatialimages import SpatialImage +# # return _get_img_fdata( +# # resample_from_to(SpatialImage(img, affine), to_vox_map, order=1)) +# import dipy.align.imaffine +# +# return dipy.align.imaffine.AffineMap( +# None, to_vox_map[0], to_vox_map[1], img.shape, affine +# ).transform(img, resample_only=True) ############################################################################### diff --git a/mne/preprocessing/ecg.py b/mne/preprocessing/ecg.py index 2cdcd991fae..531529445e6 100644 --- a/mne/preprocessing/ecg.py +++ b/mne/preprocessing/ecg.py @@ -199,8 +199,8 @@ def find_ecg_events( ------- ecg_events : array The events corresponding to the peaks of the R waves. - ch_ecg : string - Name of channel used. + ch_ecg : int | None + Index of channel used. average_pulse : float The estimated average pulse. If no ECG events could be found, this will be zero. @@ -299,6 +299,7 @@ def find_ecg_events( event_id * np.ones(n_events, int), ] ).T + out = (ecg_events, idx_ecg, average_pulse) ecg = ecg[np.newaxis] # backward compat output 2D if return_ecg: diff --git a/mne/preprocessing/maxwell.py b/mne/preprocessing/maxwell.py index d5cf8a58b6e..1b2c7c459ef 100644 --- a/mne/preprocessing/maxwell.py +++ b/mne/preprocessing/maxwell.py @@ -1422,12 +1422,6 @@ def _sq(x): return x * x -def _check_finite(data): - """Ensure data is finite.""" - if not np.isfinite(data).all(): - raise RuntimeError("data contains non-finite numbers") - - def _sph_harm_norm(order, degree): """Compute normalization factor for spherical harmonics.""" # we could use scipy.special.poch(degree + order + 1, -2 * order) diff --git a/mne/preprocessing/tests/test_ctps.py b/mne/preprocessing/tests/test_ctps.py index 20ec189229c..d6fe23d7112 100644 --- a/mne/preprocessing/tests/test_ctps.py +++ b/mne/preprocessing/tests/test_ctps.py @@ -60,7 +60,7 @@ def test_ctps(): data = get_data(n_trials, j_extent) ks_dyn, pk_dyn, phase_trial = ctps(data) data2 = _compute_normalized_phase(data) - ks_dyn2, pk_dyn2, phase_trial2 = ctps(data2, is_raw=False) + ks_dyn2, pk_dyn2, _ = ctps(data2, is_raw=False) for a, b in zip([ks_dyn, pk_dyn, phase_trial], [ks_dyn2, pk_dyn2, data2]): assert_array_equal(a, b) assert a.min() >= 0 diff --git a/mne/preprocessing/tests/test_ecg.py b/mne/preprocessing/tests/test_ecg.py index 73fee8c38f0..44551a39965 100644 --- a/mne/preprocessing/tests/test_ecg.py +++ b/mne/preprocessing/tests/test_ecg.py @@ -36,6 +36,10 @@ def test_find_ecg(): events, ch_ECG, average_pulse, ecg = find_ecg_events( raw, event_id=999, ch_name=ch_name, tstart=tstart, return_ecg=True ) + if ch_name is None: + assert ch_ECG is None + else: + assert raw.ch_names[ch_ECG] == ch_name assert raw.n_times == ecg.shape[-1] assert 40 < average_pulse < 60 n_events = len(events) diff --git a/mne/report/report.py b/mne/report/report.py index ae66591481a..ea7319cc874 100644 --- a/mne/report/report.py +++ b/mne/report/report.py @@ -141,7 +141,6 @@ ) html_include_dir = Path(__file__).parent / "js_and_css" -template_dir = Path(__file__).parent / "templates" JAVASCRIPT = (html_include_dir / "report.js").read_text(encoding="utf-8") CSS = (html_include_dir / "report.css").read_text(encoding="utf-8") @@ -4266,19 +4265,10 @@ def _add_bem( ) -def _clean_tags(tags): - if isinstance(tags, str): - tags = (tags,) - - # Replace any whitespace characters with dashes - tags_cleaned = tuple(re.sub(r"[\s*]", "-", tag) for tag in tags) - return tags_cleaned - - def _recursive_search(path, pattern): """Auxiliary function for recursive_search of the directory.""" filtered_files = list() - for dirpath, dirnames, files in os.walk(path): + for dirpath, _, files in os.walk(path): for f in fnmatch.filter(files, pattern): # only the following file types are supported # this ensures equitable distribution of jobs diff --git a/mne/stats/tests/test_cluster_level.py b/mne/stats/tests/test_cluster_level.py index 693cdc66b75..03b7811fc99 100644 --- a/mne/stats/tests/test_cluster_level.py +++ b/mne/stats/tests/test_cluster_level.py @@ -255,7 +255,7 @@ def stat_fun(X, Y): ) def test_cluster_permutation_t_test(numba_conditional, stat_fun): """Test cluster level permutations T-test.""" - condition1_1d, condition2_1d, condition1_2d, condition2_2d = _get_conditions() + condition1_1d, _, condition1_2d, _ = _get_conditions() # use a very large sigma to make sure Ts are not independent for condition1, p in ((condition1_1d, 0.01), (condition1_2d, 0.01)): @@ -272,7 +272,7 @@ def test_cluster_permutation_t_test(numba_conditional, stat_fun): p_min = np.min(cluster_p_values) assert_allclose(p_min, p, atol=1e-6) - T_obs_pos, c_1, cluster_p_values_pos, _ = permutation_cluster_1samp_test( + T_obs_pos, _, cluster_p_values_pos, _ = permutation_cluster_1samp_test( condition1, n_permutations=100, tail=1, @@ -338,7 +338,7 @@ def test_cluster_permutation_with_adjacency(numba_conditional, monkeypatch): pytest.importorskip("sklearn") from sklearn.feature_extraction.image import grid_to_graph - condition1_1d, condition2_1d, condition1_2d, condition2_2d = _get_conditions() + condition1_1d, condition2_1d, _, _ = _get_conditions() n_pts = condition1_1d.shape[1] # we don't care about p-values in any of these, so do fewer permutations @@ -355,16 +355,14 @@ def test_cluster_permutation_with_adjacency(numba_conditional, monkeypatch): ) did_warn = False - for X1d, X2d, func, spatio_temporal_func in [ + for X1d, func, spatio_temporal_func in [ ( condition1_1d, - condition1_2d, permutation_cluster_1samp_test, spatio_temporal_cluster_1samp_test, ), ( [condition1_1d, condition2_1d], - [condition1_2d, condition2_2d], permutation_cluster_test, spatio_temporal_cluster_test, ), @@ -690,7 +688,7 @@ def test_spatio_temporal_cluster_adjacency(numba_conditional): assert_equal(np.sum(p_values_adj < 0.05), np.sum(p_values_no_adj < 0.05)) # make sure results are the same without buffer_size - T_obs, clusters, p_values2, hist2 = spatio_temporal_cluster_test( + T_obs, clusters, p_values2, _ = spatio_temporal_cluster_test( [data1_2d, data2_2d], n_permutations=50, tail=1, diff --git a/mne/surface.py b/mne/surface.py index 61abb3511df..8784a8a92f3 100644 --- a/mne/surface.py +++ b/mne/surface.py @@ -845,12 +845,6 @@ def _fread3(fobj): return (b1 << 16) + (b2 << 8) + b3 -def _fread3_many(fobj, n): - """Read 3-byte ints from an open binary file object.""" - b1, b2, b3 = np.fromfile(fobj, ">u1", 3 * n).reshape(-1, 3).astype(np.int64).T - return (b1 << 16) + (b2 << 8) + b3 - - def read_curvature(filepath, binary=True): """Load in curvature values from the ?h.curv file. diff --git a/mne/tests/test_chpi.py b/mne/tests/test_chpi.py index cb9ccc60c26..1a9aaba0024 100644 --- a/mne/tests/test_chpi.py +++ b/mne/tests/test_chpi.py @@ -55,7 +55,6 @@ base_dir = Path(__file__).parents[1] / "io" / "tests" / "data" ctf_fname = base_dir / "test_ctf_raw.fif" hp_fif_fname = base_dir / "test_chpi_raw_sss.fif" -hp_fname = base_dir / "test_chpi_raw_hp.txt" raw_fname = base_dir / "test_raw.fif" data_path = testing.data_path(download=False) diff --git a/mne/tests/test_docstring_parameters.py b/mne/tests/test_docstring_parameters.py index 9d49e0c4e76..196c981deb3 100644 --- a/mne/tests/test_docstring_parameters.py +++ b/mne/tests/test_docstring_parameters.py @@ -70,7 +70,6 @@ def _func_name(func, cls=None): "mne.fixes", "mne.io.meas_info.Info", } -char_limit = 800 # XX eventually we should probably get this lower tab_ignores = [ "mne.channels.tests.test_montage", "mne.io.curry.tests.test_curry", @@ -257,7 +256,6 @@ def test_tabs(): get_score_funcs get_version invert_transform -is_power2 is_fixed_orient make_eeg_average_ref_proj make_projector diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index ff5aca7530e..85076c0ee6d 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -4330,7 +4330,7 @@ def test_make_metadata_bounded_by_row_or_tmin_tmax_event_names(tmin, tmax): raw.set_annotations(annots) events, event_id = mne.events_from_annotations(raw=raw) - metadata, events_new, event_id_new = mne.epochs.make_metadata( + metadata, events_new, _ = mne.epochs.make_metadata( events=events, event_id=event_id, tmin=tmin, diff --git a/mne/tests/test_event.py b/mne/tests/test_event.py index c51b4eaed44..a5651c1b365 100644 --- a/mne/tests/test_event.py +++ b/mne/tests/test_event.py @@ -45,8 +45,6 @@ fname_raw = base_dir / "test_raw.fif" fname_gz = base_dir / "test-eve.fif.gz" fname_1 = base_dir / "test-1-eve.fif" -fname_txt = base_dir / "test-eve.eve" -fname_txt_1 = base_dir / "test-eve-1.eve" fname_c_annot = base_dir / "test_raw-annot.fif" # for testing Elekta averager diff --git a/mne/tests/test_label.py b/mne/tests/test_label.py index 01d934417e2..a87fae0d45e 100644 --- a/mne/tests/test_label.py +++ b/mne/tests/test_label.py @@ -57,7 +57,6 @@ src_fname = subjects_dir / "sample" / "bem" / "sample-oct-6-src.fif" stc_fname = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-lh.stc" real_label_fname = data_path / "MEG" / "sample" / "labels" / "Aud-lh.label" -real_label_rh_fname = data_path / "MEG" / "sample" / "labels" / "Aud-rh.label" v1_label_fname = subjects_dir / "sample" / "label" / "lh.V1.label" fwd_fname = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg-eeg-oct-6-fwd.fif" @@ -66,7 +65,6 @@ test_path = Path(__file__).parents[1] / "io" / "tests" / "data" label_fname = test_path / "test-lh.label" -label_rh_fname = test_path / "test-rh.label" # This code was used to generate the "fake" test labels: # for hemi in ['lh', 'rh']: diff --git a/mne/tests/test_line_endings.py b/mne/tests/test_line_endings.py index c055ef41667..e7ce9540454 100644 --- a/mne/tests/test_line_endings.py +++ b/mne/tests/test_line_endings.py @@ -54,7 +54,7 @@ def _assert_line_endings(dir_): ".cov", ".label", ) - for dirpath, dirnames, filenames in os.walk(dir_): + for dirpath, _, filenames in os.walk(dir_): for fname in filenames: if op.splitext(fname)[1] not in good_exts or fname in skip_files: continue diff --git a/mne/tests/test_morph.py b/mne/tests/test_morph.py index 90b9f99382a..db88aaa74be 100644 --- a/mne/tests/test_morph.py +++ b/mne/tests/test_morph.py @@ -55,7 +55,6 @@ fname_smorph = sample_dir / "sample_audvis_trunc-meg" fname_t1 = subjects_dir / "sample" / "mri" / "T1.mgz" fname_vol = subjects_dir / "sample" / "bem" / "sample-volume-7mm-src.fif" -fname_brain = subjects_dir / "sample" / "mri" / "brain.mgz" fname_aseg = subjects_dir / "sample" / "mri" / "aseg.mgz" fname_fs_vol = subjects_dir / "fsaverage" / "bem" / "fsaverage-vol7-nointerp-src.fif.gz" fname_aseg_fs = subjects_dir / "fsaverage" / "mri" / "aseg.mgz" diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py index af638effc57..2fcdc818c45 100644 --- a/mne/tests/test_source_estimate.py +++ b/mne/tests/test_source_estimate.py @@ -1462,7 +1462,7 @@ def objective(x): want_nn /= np.linalg.norm(want_nn, axis=1, keepdims=True) stc = VolVectorSourceEstimate(data, [np.arange(n_src)], 0, 1) - stc_max, directions = stc.project("pca") + _, directions = stc.project("pca") flips = np.sign(np.sum(directions * want_nn, axis=1, keepdims=True)) directions *= flips assert_allclose(directions, want_nn, atol=2e-6) @@ -1523,9 +1523,6 @@ def invs(): return evoked, free, free_surf, freeish, fixed, fixedish -bad_normal = pytest.param("normal", marks=pytest.mark.xfail(raises=AssertionError)) - - @pytest.mark.parametrize("pick_ori", [None, "normal", "vector"]) def test_vec_stc_inv_free(invs, pick_ori): """Test vector STC behavior with two free-orientation inverses.""" diff --git a/mne/tests/test_transforms.py b/mne/tests/test_transforms.py index ef6433d951d..62329a88d25 100644 --- a/mne/tests/test_transforms.py +++ b/mne/tests/test_transforms.py @@ -503,6 +503,7 @@ def test_fit_matched_points(quats, scaling, do_scale): fro = rng.randn(10, 3) translation = rng.randn(3) for qi, quat in enumerate(quats): + print(qi) to = scaling * np.dot(quat_to_rot(quat), fro.T).T + translation for corrupted in (False, True): # mess up a point diff --git a/mne/time_frequency/csd.py b/mne/time_frequency/csd.py index 327d6a2aa68..9392cc2f2b4 100644 --- a/mne/time_frequency/csd.py +++ b/mne/time_frequency/csd.py @@ -1007,7 +1007,7 @@ def csd_array_multitaper( n_times = len(times) n_fft = n_times if n_fft is None else n_fft - window_fun, eigvals, mt_adaptive = _compute_mt_params( + window_fun, eigvals, adaptive = _compute_mt_params( n_times, sfreq, bandwidth, low_bias, adaptive ) diff --git a/mne/time_frequency/tests/test_multitaper.py b/mne/time_frequency/tests/test_multitaper.py index 9ff16159bf4..5f6982f7311 100644 --- a/mne/time_frequency/tests/test_multitaper.py +++ b/mne/time_frequency/tests/test_multitaper.py @@ -53,6 +53,7 @@ def test_multitaper_psd(n_times, adaptive, n_jobs): ) # compare assert_array_almost_equal(psd, psd_ni, decimal=4) + del freqs, freqs_ni # assert_array_equal(freqs, freqs_ni) # ↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑ # this is commented out because nitime's freq calculations differ from ours diff --git a/mne/transforms.py b/mne/transforms.py index 3fa582dbe5f..c8ed015fec8 100644 --- a/mne/transforms.py +++ b/mne/transforms.py @@ -1183,7 +1183,6 @@ def fit( destination += dest_center # 6. Compute TPS warp of matched points from smoothed surfaces self._warp = _TPSWarp().fit(source, destination, reg) - self._matched = np.array([source, destination]) logger.info("[done]") return self diff --git a/mne/utils/__init__.pyi b/mne/utils/__init__.pyi index 54dc5272c37..9fd15a6f1c7 100644 --- a/mne/utils/__init__.pyi +++ b/mne/utils/__init__.pyi @@ -7,7 +7,6 @@ __all__ = [ "ExtendedTimeMixin", "GetEpochsMixin", "ProgressBar", - "SilenceStdout", "SizeMixin", "TimeMixin", "_DefaultEventParser", @@ -101,7 +100,6 @@ __all__ = [ "_reg_pinv", "_reject_data_segments", "_repeated_svd", - "_replace_md5", "_require_version", "_resource_path", "_safe_input", @@ -133,7 +131,6 @@ __all__ = [ "check_random_state", "check_version", "compute_corr", - "copy_base_doc_to_subclass_doc", "copy_doc", "copy_function_doc_to_method_doc", "create_slices", @@ -205,7 +202,6 @@ from ._logging import ( ) from ._testing import ( ArgvSetter, - SilenceStdout, _click_ch_name, _raw_annot, _TempDir, @@ -301,7 +297,6 @@ from .dataframe import ( ) from .docs import ( _doc_special_members, - copy_base_doc_to_subclass_doc, copy_doc, copy_function_doc_to_method_doc, deprecated, @@ -370,7 +365,6 @@ from .numerics import ( _mask_to_onsets_offsets, _reg_pinv, _reject_data_segments, - _replace_md5, _ReuseCycle, _scaled_array, _stamp_to_dt, diff --git a/mne/utils/_testing.py b/mne/utils/_testing.py index b60c3d0df05..729769a2829 100644 --- a/mne/utils/_testing.py +++ b/mne/utils/_testing.py @@ -10,7 +10,6 @@ import tempfile import traceback from functools import wraps -from io import StringIO from shutil import rmtree from unittest import SkipTest @@ -141,23 +140,6 @@ def __exit__(self, *args): # noqa: D105 sys.stderr = self.orig_stderr -class SilenceStdout: - """Silence stdout.""" - - def __init__(self, close=True): - self.close = close - - def __enter__(self): # noqa: D105 - self.stdout = sys.stdout - sys.stdout = StringIO() - return sys.stdout - - def __exit__(self, *args): # noqa: D105 - if self.close: - sys.stdout.close() - sys.stdout = self.stdout - - def has_mne_c(): """Check for MNE-C.""" return "MNE_ROOT" in os.environ diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 17eb07552d8..f1747a7f626 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -5203,43 +5203,6 @@ def wrapper(func): return wrapper -def copy_base_doc_to_subclass_doc(subclass): - """Use the docstring from a parent class methods in derived class. - - The docstring of a parent class method is prepended to the - docstring of the method of the class wrapped by this decorator. - - Parameters - ---------- - subclass : wrapped class - Class to copy the docstring to. - - Returns - ------- - subclass : Derived class - The decorated class with copied docstrings. - """ - ancestors = subclass.mro()[1:-1] - - for source in ancestors: - methodList = [ - method for method in dir(source) if callable(getattr(source, method)) - ] - for method_name in methodList: - # discard private methods - if method_name[0] == "_": - continue - base_method = getattr(source, method_name) - sub_method = getattr(subclass, method_name) - if base_method is not None and sub_method is not None: - doc = base_method.__doc__ - if sub_method.__doc__ is not None: - doc += "\n" + sub_method.__doc__ - sub_method.__doc__ = doc - - return subclass - - def linkcode_resolve(domain, info): """Determine the URL corresponding to a Python object. diff --git a/mne/utils/numerics.py b/mne/utils/numerics.py index 9dbb17fa485..afbf87a0adc 100644 --- a/mne/utils/numerics.py +++ b/mne/utils/numerics.py @@ -8,8 +8,6 @@ import inspect import numbers import operator -import os -import shutil import sys from contextlib import contextmanager from datetime import datetime, timedelta, timezone @@ -64,19 +62,6 @@ def array_split_idx(ary, indices_or_sections, axis=0, n_per_split=1): return zip(idx_split, ary_split) -def create_chunks(sequence, size): - """Generate chunks from a sequence. - - Parameters - ---------- - sequence : iterable - Any iterable object - size : int - The chunksize to be returned - """ - return (sequence[p : p + size] for p in range(0, len(sequence), size)) - - def sum_squared(X): """Compute norm of an array. @@ -433,17 +418,6 @@ def hashfunc(fname, block_size=1048576, hash_type="md5"): # 2 ** 20 return hasher.hexdigest() -def _replace_md5(fname): - """Replace a file based on MD5sum.""" - # adapted from sphinx-gallery - assert fname.endswith(".new") - fname_old = fname[:-4] - if os.path.isfile(fname_old) and hashfunc(fname) == hashfunc(fname_old): - os.remove(fname) - else: - shutil.move(fname, fname_old) - - def create_slices(start, stop, step=None, length=1): """Generate slices of time indexes. diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index 207385bb07c..55d0cd79b63 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -540,10 +540,7 @@ def setup_time_viewer(self, time_viewer=True, show_traces=True): self.default_smoothing_range = [-1, 15] # Default configuration - self.playback = False self.visibility = False - self.refresh_rate_ms = max(int(round(1000.0 / 60.0)), 1) - self.default_scaling_range = [0.2, 2.0] self.default_playback_speed_range = [0.01, 1] self.default_playback_speed_value = 0.01 self.default_status_bar_msg = "Press ? for help" @@ -551,7 +548,6 @@ def setup_time_viewer(self, time_viewer=True, show_traces=True): "stc": ["mean", "max"], "src": ["mean_flip", "pca_flip", "auto"], } - self.default_trace_modes = ("vertex", "label") self.annot = None self.label_extract_mode = None all_keys = ("lh", "rh", "vol") @@ -752,30 +748,6 @@ def _configure_scalar_bar(self): self._scalar_bar.SetWidth(0.05) self._scalar_bar.SetPosition(0.02, 0.2) - def _configure_dock_time_widget(self, layout=None): - len_time = len(self._data["time"]) - 1 - if len_time < 1: - return - layout = self._renderer.dock_layout if layout is None else layout - hlayout = self._renderer._dock_add_layout(vertical=False) - self.widgets["min_time"] = self._renderer._dock_add_label( - value="-", layout=hlayout - ) - self._renderer._dock_add_stretch(hlayout) - self.widgets["current_time"] = self._renderer._dock_add_label( - value="x", layout=hlayout - ) - self._renderer._dock_add_stretch(hlayout) - self.widgets["max_time"] = self._renderer._dock_add_label( - value="+", layout=hlayout - ) - self._renderer._layout_add_widget(layout, hlayout) - min_time = float(self._data["time"][0]) - max_time = float(self._data["time"][-1]) - self.widgets["min_time"].set_value(f"{min_time: .3f}") - self.widgets["max_time"].set_value(f"{max_time: .3f}") - self.widgets["current_time"].set_value(f"{self._current_time: .3f}") - def _configure_dock_playback_widget(self, name): len_time = len(self._data["time"]) - 1 diff --git a/mne/viz/_brain/surface.py b/mne/viz/_brain/surface.py index 272123fa687..884045888d9 100644 --- a/mne/viz/_brain/surface.py +++ b/mne/viz/_brain/surface.py @@ -95,7 +95,6 @@ def __init__( self.coords = None self.curv = None self.faces = None - self.grey_curv = None self.nn = None self.labels = dict() self.x_dir = x_dir @@ -178,14 +177,6 @@ def load_curvature(self): if path.isfile(curv_path): self.curv = read_curvature(curv_path, binary=False) self.bin_curv = np.array(self.curv > 0, np.int64) - color = (self.curv > 0).astype(float) else: self.curv = None self.bin_curv = None - color = np.ones(self.coords.shape[0]) - # morphometry (curvature) normalization in order to get gray cortex - # TODO: delete self.grey_curv after cortex parameter - # will be fully supported - color = 0.5 - (color - 0.5) / 3 - color = color[:, np.newaxis] * [1, 1, 1] - self.grey_curv = color diff --git a/mne/viz/_mpl_figure.py b/mne/viz/_mpl_figure.py index 702a38b2319..08e6a2e8aca 100644 --- a/mne/viz/_mpl_figure.py +++ b/mne/viz/_mpl_figure.py @@ -1100,7 +1100,7 @@ def _create_annotation_fig(self): checkbox.on_clicked(self._toggle_draggable_annotations) fig.mne.drag_checkbox = checkbox # reposition & resize axes - width_in, height_in = fig.get_size_inches() + width_in, _ = fig.get_size_inches() width_ax = fig._inch_to_rel( width_in - ANNOTATION_FIG_CHECKBOX_COLUMN_W - 3 * ANNOTATION_FIG_PAD ) diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index da061b0a35b..4f395344f41 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -1086,13 +1086,6 @@ def _to_pos(azimuth, elevation): return x, y, z -def _mat_to_array(vtk_mat): - e = [vtk_mat.GetElement(i, j) for i in range(4) for j in range(4)] - arr = np.array(e, dtype=float) - arr.shape = (4, 4) - return arr - - def _3d_to_2d(plotter, xyz): # https://vtk.org/Wiki/VTK/Examples/Cxx/Utilities/Coordinate coordinate = vtkCoordinate() diff --git a/mne/viz/backends/renderer.py b/mne/viz/backends/renderer.py index 510d8b99fc4..22ad863a38b 100644 --- a/mne/viz/backends/renderer.py +++ b/mne/viz/backends/renderer.py @@ -405,7 +405,6 @@ def _enable_time_interaction( self._times = times self._init_time = current_time_func() self._init_playback_speed = init_playback_speed - self._playback_speed_range = playback_speed_range if not hasattr(self, "_dock"): self._dock_initialize() diff --git a/mne/viz/misc.py b/mne/viz/misc.py index b072e4ff183..cf9687483ac 100644 --- a/mne/viz/misc.py +++ b/mne/viz/misc.py @@ -1020,12 +1020,6 @@ def _get_flim(flim, fscale, freq, sfreq=None): return flim -def _check_fscale(fscale): - """Check for valid fscale.""" - if not isinstance(fscale, str) or fscale not in ("log", "linear"): - raise ValueError(f'fscale must be "log" or "linear", got {fscale}') - - _DEFAULT_ALIM = (-80, 10) diff --git a/mne/viz/tests/test_montage.py b/mne/viz/tests/test_montage.py index 332ca82a6a4..adf818b2651 100644 --- a/mne/viz/tests/test_montage.py +++ b/mne/viz/tests/test_montage.py @@ -19,7 +19,6 @@ elp = p_dir / "test_elp.txt" hsp = p_dir / "test_hsp.txt" hpi = p_dir / "test_mrk.sqd" -point_names = ["nasion", "lpa", "rpa", "1", "2", "3", "4", "5"] io_dir = Path(__file__).parents[2] / "io" / "tests" / "data" fif_fname = io_dir / "test_raw.fif" diff --git a/mne/viz/utils.py b/mne/viz/utils.py index c4e02c55c61..d95a728f8f9 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -680,12 +680,6 @@ def _show_help_fig(col1, col2, fig_help, ax, show): pass -def _show_help(col1, col2, width, height): - fig_help = figure_nobar(figsize=(width, height), dpi=80) - ax = fig_help.add_subplot(111) - _show_help_fig(col1, col2, fig_help, ax, show=True) - - def _key_press(event): """Handle key press in dialog.""" import matplotlib.pyplot as plt diff --git a/pyproject.toml b/pyproject.toml index 78615dc5568..9c7f4e04fb8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -388,7 +388,14 @@ verify_pr_number = true changelog_skip_label = "no-changelog-entry-needed" [tool.vulture] -min_confidence = 70 -paths = ["mne", "tools/vulture_allowlist.py"] -sort_by_size = true +min_confidence = 60 +paths = ['mne', 'tools/vulture_allowlist.py'] +ignore_decorators = ['@observe'] +exclude = [ + 'conftest.py', + 'constants.py', + 'mne/viz/backends/_qt.py', + 'mne/viz/backends/_abstract.py', + 'mne/viz/backends/_notebook.py', +] verbose = false diff --git a/tools/vulture_allowlist.py b/tools/vulture_allowlist.py index c0ac3317e09..370a9685a51 100644 --- a/tools/vulture_allowlist.py +++ b/tools/vulture_allowlist.py @@ -12,12 +12,126 @@ startdir pg_backend recwarn -verbose_debug -few_surfaces +pytestmark +nbexec disabled_event_channels +ch_subset_adjacency +few_surfaces +want_orig_dist +eeglab_montage +invisible_fig +captions_new +comments_new +items_new +has_imageio_ffmpeg +has_pyvista +f4 +set_channel_types_eyetrack +_use_test_3d_backend +verbose_debug metadata_routing -# Others -exc_value +# Backward compat or rarely used +RawFIF +estimate_head_mri_t +plot_epochs_psd_topomap +plot_epochs_psd +plot_psd_topomap +plot_raw_psd_topo +plot_psd_topo +read_ctf_comp +read_bad_channels +set_cache_dir +spatial_dist_adjacency +set_cuda_device +eegbci.standardize +_.plot_topo_image +_._get_tags +_.mahalanobis exc_type -estimate_head_mri_t # imported for backward compat +exc_value + +# Unused but for compat or CIs +fit_params_ # search_light +format_epilog # false alarm for opt parser +_._fit_transform # in getattr +_.plot_3d # not tested for all classes +_.error_norm # cov +_download_all_example_data # CIs +_cleanup_agg +_notebook_vtk_works +_.drop_inds_ + +# mne/io/snirf/tests/test_snirf.py +_.dataTimeSeries +_.sourceIndex +_.detectorIndex +_.wavelengthIndex +_.dataType +_.dataTypeIndex +_.dataTypeLabel +_.dataTypeLabel +_.SubjectID +_.MeasurementDate +_.MeasurementTime +_.LengthUnit +_.TimeUnit +_.FrequencyUnit +_.wavelengths +_.sourcePos3D +_.detectorPos3D + +# numerics.py +_.noise_variance_ +_.n_features_ + +# Brain, Coreg, PyVista +_._Iren +_.active_scalars_name +_.active_vectors_name +_._plotter +_.set_fmax +_.set_fmid +_.set_fmin +_.EnterEvent +_.MouseMoveEvent +_.LeaveEvent +_.SetEventInformation +_.CharEvent +_.KeyPressEvent +_.KeyReleaseEvent +_PyVistaRenderer +_TimeInteraction +set_3d_options +_._has_lpa_data +_._has_nasion_data +_._has_rpa_data +_._nearest_transformed_high_res_mri_idx_rpa +_._nearest_transformed_high_res_mri_idx_nasion +_._nearest_transformed_high_res_mri_idx_lpa + +# Figures (prevent GC for example) +_.decim_data +_.button_help +_.button_proj +_.mne_animation +_.RS +_.showNormal +_.showFullScreen +_.isFullScreen +_._span_selector +ypress +scroll +keypress +azim +_loc +eventson +_.argtypes +_.restype +_.labelpad +_.fake_keypress + +# Used in ignored files +_qt_raise_window +_qt_disable_paint +_qt_get_stylesheet