Skip to content

Running into errors while trying to sort tetrode data by group #3825

@nightshade2303

Description

@nightshade2303

Hello, we are a new lab and are just starting our first few recordings. I wanted to get used to the spikeinterface environment, and so have been using it to load our data and attempt to sort it. However, I have been facing this issue which I could not really trace back:

recording = se.read_openephys(data_path, stream_name = 'Record Node 101#OE_FPGA_Acquisition_Board-100.Rhythm Data')
events = se.read_openephys_event(data_path)
print(recording)
# select ephys channels only
selection = ['CH'+str(i) for i in np.arange(1,65)]
ephys_recording = recording.select_channels(selection)

# make a channel map 
channelmapoe = [40, 38, 36, 34,
                48, 46, 44, 42,
                56, 54, 52, 50,
                58, 64, 62, 60,
                63, 61, 59, 57,
                55, 53, 51, 49,
                47, 45, 43, 41,
                39, 37, 35, 33,
                25, 27, 29, 31,
                17, 19, 21, 23,
                9, 11, 13, 15,
                1, 3, 5, 7,
                4, 6, 8, 2,
                10, 12, 14, 16,
                18, 20, 22, 24,
                26, 28, 30, 32]

# subtract 1 to fit python indexing
channelmappythonic = np.array(channelmapoe) - 1

# create probegroup and set channel locations for sorting purposes
probegroup = ProbeGroup()
for i in range(16): # we have 16 tetrodes
    tetrode = generate_tetrode()
    tetrode.move([i * 300, 0]) # make lots of space between tetrodes
    probegroup.add_probe(tetrode)

probegroup.set_global_device_channel_indices(channelmappythonic) # tetrodes arranged based on channelmap
ephys_recording = ephys_recording.set_probegroup(probegroup, group_mode='by_probe')

# remove all bad channels
bad_channel_ids = [48, 39, 33, 21, 12, 13, 15, 3, 5, 7]
bad_channel_ids = ['CH'+str(i) for i in bad_channel_ids]
ephys_recording = ephys_recording.remove_channels(remove_channel_ids=bad_channel_ids)
# group-wise preprocessinga

from spikeinterface import aggregate_channels
# bad_channel_ids, info = spre.detect_bad_channels(ephys_recording)
# print(bad_channel_ids, info)
# ephys_recording = ephys_recording.remove_channels(bad_channel_ids)

split_recording_dict = ephys_recording.split_by("group")

preprocessed_recordings = []

# loop over the recordings contained in the dictionary
for chan_group_rec in split_recording_dict.values():

    # Apply the preprocessing steps to the channel group in isolation
    filtered_recording = spre.bandpass_filter(chan_group_rec, freq_min=300, freq_max=6000)

    referenced_recording = spre.common_reference(filtered_recording, operator='median')

    preprocessed_recordings.append(referenced_recording)

 # Combine our preprocessed channel groups back together
combined_preprocessed_recording = aggregate_channels(preprocessed_recordings)

aggregate_sorting = ss.run_sorter_by_property(
    sorter_name = 'tridesclous2',
    recording = combined_preprocessed_recording,
    grouping_property = "group",
    folder = fr'L:\4portProb_ephys\Box1_ephys\Bayleef\sorted\{folder_name}_sorted_by_group_1',
    engine = "joblib",
    verbose = True, 
    engine_kwargs = {"n_jobs": 1}
)

But I keep getting the following error:

---------------------------------------------------------------------------
SpikeSortingError                         Traceback (most recent call last)
Cell In[11], line 26
     23 combined_preprocessed_recording = aggregate_channels(preprocessed_recordings)
     25 # only for si.__version__ == 0.102.0
---> 26 aggregate_sorting = ss.run_sorter_by_property(
     27     sorter_name = 'tridesclous2',
     28     recording = combined_preprocessed_recording,
     29     grouping_property = "group",
     30     folder = fr'L:\4portProb_ephys\Box1_ephys\Bayleef\sorted\{folder_name}_sorted_by_group_1',
     31     engine = "joblib",
     32     verbose = True, 
     33     engine_kwargs = {"n_jobs": 1}
     34 )
     35 # sorting = ss.run_sorter(
     36 #     sorter_name = 'mountainsort5',
     37 #     filter = True,
   (...)     41 #     folder = f'{folder_name}_sorted6'
     42 # )

File c:\Users\dlab\miniforge3\envs\si_env_rolling_new\Lib\site-packages\spikeinterface\sorters\launcher.py:308, in run_sorter_by_property(sorter_name, recording, grouping_property, folder, mode_if_folder_exists, engine, engine_kwargs, verbose, docker_image, singularity_image, working_folder, **sorter_params)
    297     job = dict(
    298         sorter_name=sorter_name,
    299         recording=rec,
   (...)    304         **sorter_params,
    305     )
    306     job_list.append(job)
--> 308 sorting_list = run_sorter_jobs(job_list, engine=engine, engine_kwargs=engine_kwargs, return_output=True)
    310 unit_groups = []
    311 for sorting, group in zip(sorting_list, recording_dict.keys()):

File c:\Users\dlab\miniforge3\envs\si_env_rolling_new\Lib\site-packages\spikeinterface\sorters\launcher.py:115, in run_sorter_jobs(job_list, engine, engine_kwargs, return_output)
    113 n_jobs = engine_kwargs["n_jobs"]
    114 backend = engine_kwargs["backend"]
--> 115 sortings = Parallel(n_jobs=n_jobs, backend=backend)(delayed(run_sorter)(**kwargs) for kwargs in job_list)
    116 if return_output:
    117     out.extend(sortings)

File c:\Users\dlab\miniforge3\envs\si_env_rolling_new\Lib\site-packages\joblib\parallel.py:1918, in Parallel.__call__(self, iterable)
   1916     output = self._get_sequential_output(iterable)
   1917     next(output)
-> 1918     return output if self.return_generator else list(output)
   1920 # Let's create an ID that uniquely identifies the current call. If the
   1921 # call is interrupted early and that the same instance is immediately
   1922 # re-used, this id will be used to prevent workers that were
   1923 # concurrently finalizing a task from the previous call to run the
   1924 # callback.
   1925 with self._lock:

File c:\Users\dlab\miniforge3\envs\si_env_rolling_new\Lib\site-packages\joblib\parallel.py:1847, in Parallel._get_sequential_output(self, iterable)
   1845 self.n_dispatched_batches += 1
   1846 self.n_dispatched_tasks += 1
-> 1847 res = func(*args, **kwargs)
   1848 self.n_completed_tasks += 1
   1849 self.print_progress()

File c:\Users\dlab\miniforge3\envs\si_env_rolling_new\Lib\site-packages\spikeinterface\sorters\runsorter.py:199, in run_sorter(sorter_name, recording, folder, remove_existing_folder, delete_output_folder, verbose, raise_error, docker_image, singularity_image, delete_container_files, with_output, output_folder, **sorter_params)
    188             raise RuntimeError(
    189                 "The python `spython` package must be installed to "
    190                 "run singularity. Install with `pip install spython`"
    191             )
    193     return run_sorter_container(
    194         container_image=container_image,
    195         mode=mode,
    196         **common_kwargs,
    197     )
--> 199 return run_sorter_local(**common_kwargs)

File c:\Users\dlab\miniforge3\envs\si_env_rolling_new\Lib\site-packages\spikeinterface\sorters\runsorter.py:261, in run_sorter_local(sorter_name, recording, folder, remove_existing_folder, delete_output_folder, verbose, raise_error, with_output, output_folder, **sorter_params)
    259 SorterClass.setup_recording(recording, folder, verbose=verbose)
    260 # This NEEDS to happen in the docker because of dependencies
--> 261 SorterClass.run_from_folder(folder, raise_error, verbose)
    262 if with_output:
    263     sorting = SorterClass.get_result_from_folder(folder, register_recording=True, sorting_info=True)

File c:\Users\dlab\miniforge3\envs\si_env_rolling_new\Lib\site-packages\spikeinterface\sorters\basesorter.py:310, in BaseSorter.run_from_folder(cls, output_folder, raise_error, verbose)
    307         print(f"{sorter_name} run time {run_time:0.2f}s")
    309 if has_error and raise_error:
--> 310     raise SpikeSortingError(
    311         f"Spike sorting error trace:\n{error_log_to_display}\n"
    312         f"Spike sorting failed. You can inspect the runtime trace in {output_folder}/spikeinterface_log.json."
    313     )
    315 return run_time

SpikeSortingError: Spike sorting error trace:
Traceback (most recent call last):
  File "c:\Users\dlab\miniforge3\envs\si_env_rolling_new\Lib\site-packages\spikeinterface\sorters\basesorter.py", line 270, in run_from_folder
    SorterClass._run_from_folder(sorter_output_folder, sorter_params, verbose)
  File "c:\Users\dlab\miniforge3\envs\si_env_rolling_new\Lib\site-packages\spikeinterface\sorters\internal\tridesclous2.py", line 107, in _run_from_folder
    recording_raw = cls.load_recording_from_folder(sorter_output_folder.parent, with_warnings=False)
                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "c:\Users\dlab\miniforge3\envs\si_env_rolling_new\Lib\site-packages\spikeinterface\sorters\basesorter.py", line 221, in load_recording_from_folder
    recording = load(json_file, base_folder=output_folder)
                ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "c:\Users\dlab\miniforge3\envs\si_env_rolling_new\Lib\site-packages\spikeinterface\core\loading.py", line 100, in load
    return _load_object_from_dict(d, object_type, base_folder=base_folder)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "c:\Users\dlab\miniforge3\envs\si_env_rolling_new\Lib\site-packages\spikeinterface\core\loading.py", line 175, in _load_object_from_dict
    return BaseExtractor.from_dict(d, base_folder=base_folder)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "c:\Users\dlab\miniforge3\envs\si_env_rolling_new\Lib\site-packages\spikeinterface\core\base.py", line 571, in from_dict
    extractor = _load_extractor_from_dict(dictionary)
                ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "c:\Users\dlab\miniforge3\envs\si_env_rolling_new\Lib\site-packages\spikeinterface\core\base.py", line 1108, in _load_extractor_from_dict
    new_kwargs[name] = _load_extractor_from_dict(value)
                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "c:\Users\dlab\miniforge3\envs\si_env_rolling_new\Lib\site-packages\spikeinterface\core\base.py", line 1127, in _load_extractor_from_dict
    extractor = extractor_class(**new_kwargs)
                ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
TypeError: ChannelsAggregationRecording.__init__() got an unexpected keyword argument 'recording_list'

Spike sorting failed. You can inspect the runtime trace in L:\4portProb_ephys\Box1_ephys\Bayleef\sorted\Bayleef_2025-01-30_11-53-57_010_sorted_by_group_1\0/spikeinterface_log.json.

If anyone can help me out on this I'll be really grateful. Thanks!!

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions