From 1ce0aa5c5563dd3f5b30074be0249e079e2a8a61 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 12 Nov 2025 17:58:33 +0100 Subject: [PATCH 01/12] Caching the norms --- .../postprocessing/template_similarity.py | 46 +++++++++++-------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/src/spikeinterface/postprocessing/template_similarity.py b/src/spikeinterface/postprocessing/template_similarity.py index 91923521f1..d24ac6b8e9 100644 --- a/src/spikeinterface/postprocessing/template_similarity.py +++ b/src/spikeinterface/postprocessing/template_similarity.py @@ -223,6 +223,13 @@ def _compute_similarity_matrix_numpy( # We can use the fact that dist[i,j] at lag t is equal to dist[j,i] at time -t # So the matrix can be computed only for negative lags and be transposed + if method == "l1": + norms = np.sum(np.abs(templates_array), axis=(1)) + elif method == "l2": + norms = np.sum(templates_array**2, axis=(1)) + elif method == "cosine": + norms = np.sum(templates_array**2, axis=(1)) + if same_array: # optimisation when array are the same because of symetry in shift shift_loop = range(-num_shifts, 1) @@ -244,20 +251,22 @@ def _compute_similarity_matrix_numpy( continue src = src_template[:, local_mask[j]].reshape(1, -1) tgt = (tgt_templates[gcount][:, local_mask[j]]).reshape(1, -1) + + local_norms = norms[:, local_mask[j]] if method == "l1": - norm_i = np.sum(np.abs(src)) - norm_j = np.sum(np.abs(tgt)) + norm_i = local_norms[i].sum() + norm_j = local_norms[j].sum() distances[count, i, j] = np.sum(np.abs(src - tgt)) distances[count, i, j] /= norm_i + norm_j elif method == "l2": - norm_i = np.linalg.norm(src, ord=2) - norm_j = np.linalg.norm(tgt, ord=2) + norm_i = np.sqrt(local_norms[i].sum()) + norm_j = np.sqrt(local_norms[j].sum()) distances[count, i, j] = np.linalg.norm(src - tgt, ord=2) distances[count, i, j] /= norm_i + norm_j elif method == "cosine": - norm_i = np.linalg.norm(src, ord=2) - norm_j = np.linalg.norm(tgt, ord=2) + norm_i = np.sqrt(local_norms[i].sum()) + norm_j = np.sqrt(local_norms[j].sum()) distances[count, i, j] = np.sum(src * tgt) distances[count, i, j] /= norm_i * norm_j distances[count, i, j] = 1 - distances[count, i, j] @@ -294,14 +303,17 @@ def _compute_similarity_matrix_numba( # optimisation when array are the same because of symetry in shift shift_loop = list(range(-num_shifts, 1)) else: - shift_loop = list(range(-num_shifts, num_shifts + 1)) + shift_loop = list(range(-num_shifts, num_shifts + 1)) if method == "l1": metric = 0 + norms = np.sum(np.abs(templates_array), axis=(1)) elif method == "l2": metric = 1 + norms = np.sum(templates_array**2, axis=(1)) elif method == "cosine": metric = 2 + norms = np.sum(templates_array**2, axis=(1)) for count in range(len(shift_loop)): shift = shift_loop[count] @@ -341,30 +353,26 @@ def _compute_similarity_matrix_numba( norm_j = 0 distances[count, i, j] = 0 + local_norms = norms[:, local_mask[j]] + for k in range(len(src)): if metric == 0: - norm_i += abs(src[k]) - norm_j += abs(tgt[k]) distances[count, i, j] += abs(src[k] - tgt[k]) elif metric == 1: - norm_i += src[k] ** 2 - norm_j += tgt[k] ** 2 distances[count, i, j] += (src[k] - tgt[k]) ** 2 elif metric == 2: distances[count, i, j] += src[k] * tgt[k] - norm_i += src[k] ** 2 - norm_j += tgt[k] ** 2 - + if metric == 0: - distances[count, i, j] /= norm_i + norm_j + distances[count, i, j] /= local_norms[i].sum() + local_norms[j].sum() elif metric == 1: - norm_i = sqrt(norm_i) - norm_j = sqrt(norm_j) + norm_i = sqrt(local_norms[i].sum()) + norm_j = sqrt(local_norms[j].sum()) distances[count, i, j] = sqrt(distances[count, i, j]) distances[count, i, j] /= norm_i + norm_j elif metric == 2: - norm_i = sqrt(norm_i) - norm_j = sqrt(norm_j) + norm_i = sqrt(local_norms[i].sum()) + norm_j = sqrt(local_norms[j].sum()) distances[count, i, j] /= norm_i * norm_j distances[count, i, j] = 1 - distances[count, i, j] From 254b7a35ba2329abebb783f70b464d4b1c49a158 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 12 Nov 2025 20:59:15 +0100 Subject: [PATCH 02/12] Use sparsity internaly while getting templates in analyzer --- src/spikeinterface/core/analyzer_extension_core.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/spikeinterface/core/analyzer_extension_core.py b/src/spikeinterface/core/analyzer_extension_core.py index fea3f3618e..6efda66644 100644 --- a/src/spikeinterface/core/analyzer_extension_core.py +++ b/src/spikeinterface/core/analyzer_extension_core.py @@ -445,6 +445,7 @@ def _run(self, verbose=False, **job_kwargs): self.nafter, return_in_uV=return_in_uV, return_std=return_std, + sparsity_mask=None if self.sparsity is None else self.sparsity.mask, verbose=verbose, **job_kwargs, ) From 13679f6ae96093db3ab66180de9b2ab3a69c07ae Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 12 Nov 2025 21:53:56 +0100 Subject: [PATCH 03/12] WIP --- src/spikeinterface/core/analyzer_extension_core.py | 12 ++++++++++++ src/spikeinterface/core/template_tools.py | 5 ++--- .../postprocessing/template_similarity.py | 9 +++++++++ 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/core/analyzer_extension_core.py b/src/spikeinterface/core/analyzer_extension_core.py index 6efda66644..a01102b261 100644 --- a/src/spikeinterface/core/analyzer_extension_core.py +++ b/src/spikeinterface/core/analyzer_extension_core.py @@ -624,6 +624,18 @@ def _get_data(self, operator="average", percentile=None, outputs="numpy"): templates_array = self.data[key] + if self.sparsity is not None: + # For consistency, we always return dense templates even if sparsity is used + dense_templates_array = np.zeros((templates_array.shape[0], + templates_array.shape[1], + self.sorting_analyzer.get_num_channels()), + dtype=templates_array.dtype) + for unit_index, unit_id in enumerate(self.sorting_analyzer.unit_ids): + chan_inds = self.sparsity.unit_id_to_channel_indices[unit_id] + dense_templates_array[unit_index][:, chan_inds] = templates_array[unit_index, :, :chan_inds.size] + templates_array = dense_templates_array + + if outputs == "numpy": return templates_array elif outputs == "Templates": diff --git a/src/spikeinterface/core/template_tools.py b/src/spikeinterface/core/template_tools.py index 834d70e41b..81ab985fd7 100644 --- a/src/spikeinterface/core/template_tools.py +++ b/src/spikeinterface/core/template_tools.py @@ -35,16 +35,15 @@ def get_dense_templates_array(one_object: Templates | SortingAnalyzer, return_in ext = one_object.get_extension("templates") if ext is not None: if "average" in ext.data: - templates_array = ext.data.get("average") + templates_array = ext.get_data("average") elif "median" in ext.data: - templates_array = ext.data.get("median") + templates_array = ext.get_data("median") else: raise ValueError("Average or median templates have not been computed.") else: raise ValueError("SortingAnalyzer need extension 'templates' to be computed to retrieve templates") else: raise ValueError("Input should be Templates or SortingAnalyzer") - return templates_array diff --git a/src/spikeinterface/postprocessing/template_similarity.py b/src/spikeinterface/postprocessing/template_similarity.py index d24ac6b8e9..c4a38b05f3 100644 --- a/src/spikeinterface/postprocessing/template_similarity.py +++ b/src/spikeinterface/postprocessing/template_similarity.py @@ -394,7 +394,11 @@ def get_overlapping_mask_for_one_template(template_index, sparsity, other_sparsi if support == "intersection": mask = np.logical_and(sparsity[template_index, :], other_sparsity) # shape (other_num_templates, num_channels) elif support == "union": + connected_mask = np.logical_and(sparsity[template_index, :], other_sparsity) + not_connected_mask = ~np.any(connected_mask, axis=1) mask = np.logical_or(sparsity[template_index, :], other_sparsity) # shape (other_num_templates, num_channels) + for i in np.flatnonzero(not_connected_mask): + mask[i] = False elif support == "dense": mask = np.ones(other_sparsity.shape, dtype=bool) return mask @@ -441,11 +445,16 @@ def compute_similarity_with_templates_array( else: other_sparsity_mask = np.ones((other_templates_array.shape[0], other_templates_array.shape[2]), dtype=bool) + #import time + #t_start = time.time() + assert num_shifts < num_samples, "max_lag is too large" distances = _compute_similarity_matrix( templates_array, other_templates_array, num_shifts, method, sparsity_mask, other_sparsity_mask, support=support ) + #print('Time to compute distances matrix:', time.time() - t_start) + lags = np.argmin(distances, axis=0) - num_shifts distances = np.min(distances, axis=0) similarity = 1 - distances From 30138cfd1f514753c4332a44a48cc3e0764ecdb0 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 12 Nov 2025 22:04:47 +0100 Subject: [PATCH 04/12] Patch for numba --- .../postprocessing/template_similarity.py | 58 ++++++++----------- 1 file changed, 25 insertions(+), 33 deletions(-) diff --git a/src/spikeinterface/postprocessing/template_similarity.py b/src/spikeinterface/postprocessing/template_similarity.py index c4a38b05f3..f23569b15c 100644 --- a/src/spikeinterface/postprocessing/template_similarity.py +++ b/src/spikeinterface/postprocessing/template_similarity.py @@ -223,13 +223,6 @@ def _compute_similarity_matrix_numpy( # We can use the fact that dist[i,j] at lag t is equal to dist[j,i] at time -t # So the matrix can be computed only for negative lags and be transposed - if method == "l1": - norms = np.sum(np.abs(templates_array), axis=(1)) - elif method == "l2": - norms = np.sum(templates_array**2, axis=(1)) - elif method == "cosine": - norms = np.sum(templates_array**2, axis=(1)) - if same_array: # optimisation when array are the same because of symetry in shift shift_loop = range(-num_shifts, 1) @@ -251,22 +244,20 @@ def _compute_similarity_matrix_numpy( continue src = src_template[:, local_mask[j]].reshape(1, -1) tgt = (tgt_templates[gcount][:, local_mask[j]]).reshape(1, -1) - - local_norms = norms[:, local_mask[j]] if method == "l1": - norm_i = local_norms[i].sum() - norm_j = local_norms[j].sum() + norm_i = np.sum(np.abs(src)) + norm_j = np.sum(np.abs(tgt)) distances[count, i, j] = np.sum(np.abs(src - tgt)) distances[count, i, j] /= norm_i + norm_j elif method == "l2": - norm_i = np.sqrt(local_norms[i].sum()) - norm_j = np.sqrt(local_norms[j].sum()) + norm_i = np.linalg.norm(src, ord=2) + norm_j = np.linalg.norm(tgt, ord=2) distances[count, i, j] = np.linalg.norm(src - tgt, ord=2) distances[count, i, j] /= norm_i + norm_j elif method == "cosine": - norm_i = np.sqrt(local_norms[i].sum()) - norm_j = np.sqrt(local_norms[j].sum()) + norm_i = np.linalg.norm(src, ord=2) + norm_j = np.linalg.norm(tgt, ord=2) distances[count, i, j] = np.sum(src * tgt) distances[count, i, j] /= norm_i * norm_j distances[count, i, j] = 1 - distances[count, i, j] @@ -303,17 +294,14 @@ def _compute_similarity_matrix_numba( # optimisation when array are the same because of symetry in shift shift_loop = list(range(-num_shifts, 1)) else: - shift_loop = list(range(-num_shifts, num_shifts + 1)) + shift_loop = list(range(-num_shifts, num_shifts + 1)) if method == "l1": metric = 0 - norms = np.sum(np.abs(templates_array), axis=(1)) elif method == "l2": metric = 1 - norms = np.sum(templates_array**2, axis=(1)) elif method == "cosine": metric = 2 - norms = np.sum(templates_array**2, axis=(1)) for count in range(len(shift_loop)): shift = shift_loop[count] @@ -331,9 +319,14 @@ def _compute_similarity_matrix_numba( sparsity_mask[i, :], other_sparsity_mask ) # shape (other_num_templates, num_channels) elif support == "union": + connected_mask = np.logical_and(sparsity_mask[i, :], other_sparsity_mask) + not_connected_mask = ~np.any(connected_mask, axis=1) local_mask = np.logical_or( - sparsity_mask[i, :], other_sparsity_mask + sparsity_mask[i, :], other_sparsity_mask ) # shape (other_num_templates, num_channels) + for i in np.flatnonzero(not_connected_mask): + local_mask[i] = False + elif support == "dense": local_mask = np.ones((other_num_templates, num_channels), dtype=np.bool_) @@ -353,26 +346,30 @@ def _compute_similarity_matrix_numba( norm_j = 0 distances[count, i, j] = 0 - local_norms = norms[:, local_mask[j]] - for k in range(len(src)): if metric == 0: + norm_i += abs(src[k]) + norm_j += abs(tgt[k]) distances[count, i, j] += abs(src[k] - tgt[k]) elif metric == 1: + norm_i += src[k] ** 2 + norm_j += tgt[k] ** 2 distances[count, i, j] += (src[k] - tgt[k]) ** 2 elif metric == 2: distances[count, i, j] += src[k] * tgt[k] - + norm_i += src[k] ** 2 + norm_j += tgt[k] ** 2 + if metric == 0: - distances[count, i, j] /= local_norms[i].sum() + local_norms[j].sum() + distances[count, i, j] /= norm_i + norm_j elif metric == 1: - norm_i = sqrt(local_norms[i].sum()) - norm_j = sqrt(local_norms[j].sum()) + norm_i = sqrt(norm_i) + norm_j = sqrt(norm_j) distances[count, i, j] = sqrt(distances[count, i, j]) distances[count, i, j] /= norm_i + norm_j elif metric == 2: - norm_i = sqrt(local_norms[i].sum()) - norm_j = sqrt(local_norms[j].sum()) + norm_i = sqrt(norm_i) + norm_j = sqrt(norm_j) distances[count, i, j] /= norm_i * norm_j distances[count, i, j] = 1 - distances[count, i, j] @@ -445,16 +442,11 @@ def compute_similarity_with_templates_array( else: other_sparsity_mask = np.ones((other_templates_array.shape[0], other_templates_array.shape[2]), dtype=bool) - #import time - #t_start = time.time() - assert num_shifts < num_samples, "max_lag is too large" distances = _compute_similarity_matrix( templates_array, other_templates_array, num_shifts, method, sparsity_mask, other_sparsity_mask, support=support ) - #print('Time to compute distances matrix:', time.time() - t_start) - lags = np.argmin(distances, axis=0) - num_shifts distances = np.min(distances, axis=0) similarity = 1 - distances From e79020522e0cd1ded444501a5ffa43647fc0cb64 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 12 Nov 2025 22:09:01 +0100 Subject: [PATCH 05/12] syntax --- src/spikeinterface/postprocessing/template_similarity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/postprocessing/template_similarity.py b/src/spikeinterface/postprocessing/template_similarity.py index f23569b15c..8a64d0aab3 100644 --- a/src/spikeinterface/postprocessing/template_similarity.py +++ b/src/spikeinterface/postprocessing/template_similarity.py @@ -320,7 +320,7 @@ def _compute_similarity_matrix_numba( ) # shape (other_num_templates, num_channels) elif support == "union": connected_mask = np.logical_and(sparsity_mask[i, :], other_sparsity_mask) - not_connected_mask = ~np.any(connected_mask, axis=1) + not_connected_mask = np.logical_not(np.any(connected_mask, axis=1)) local_mask = np.logical_or( sparsity_mask[i, :], other_sparsity_mask ) # shape (other_num_templates, num_channels) From 4628f1d806e56b05a5ceef1e4722c8a9ed0e0db6 Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 12 Nov 2025 23:01:59 +0100 Subject: [PATCH 06/12] Patch for numba --- src/spikeinterface/postprocessing/template_similarity.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/spikeinterface/postprocessing/template_similarity.py b/src/spikeinterface/postprocessing/template_similarity.py index 8a64d0aab3..7f541afd34 100644 --- a/src/spikeinterface/postprocessing/template_similarity.py +++ b/src/spikeinterface/postprocessing/template_similarity.py @@ -320,12 +320,13 @@ def _compute_similarity_matrix_numba( ) # shape (other_num_templates, num_channels) elif support == "union": connected_mask = np.logical_and(sparsity_mask[i, :], other_sparsity_mask) - not_connected_mask = np.logical_not(np.any(connected_mask, axis=1)) + not_connected_mask = np.sum(connected_mask, axis=1) == 0 local_mask = np.logical_or( sparsity_mask[i, :], other_sparsity_mask ) # shape (other_num_templates, num_channels) - for i in np.flatnonzero(not_connected_mask): - local_mask[i] = False + for local_i in range(len(not_connected_mask)): + if not_connected_mask[local_i]: + local_mask[local_i] = False elif support == "dense": local_mask = np.ones((other_num_templates, num_channels), dtype=np.bool_) @@ -392,7 +393,7 @@ def get_overlapping_mask_for_one_template(template_index, sparsity, other_sparsi mask = np.logical_and(sparsity[template_index, :], other_sparsity) # shape (other_num_templates, num_channels) elif support == "union": connected_mask = np.logical_and(sparsity[template_index, :], other_sparsity) - not_connected_mask = ~np.any(connected_mask, axis=1) + not_connected_mask = np.sum(connected_mask, axis=1) == 0 mask = np.logical_or(sparsity[template_index, :], other_sparsity) # shape (other_num_templates, num_channels) for i in np.flatnonzero(not_connected_mask): mask[i] = False From 8c1b54bfc93895e6a3a36cde7c329c360c5afc7c Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Wed, 12 Nov 2025 23:04:21 +0100 Subject: [PATCH 07/12] Patch for numba --- src/spikeinterface/postprocessing/template_similarity.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/spikeinterface/postprocessing/template_similarity.py b/src/spikeinterface/postprocessing/template_similarity.py index 7f541afd34..41b8c852f8 100644 --- a/src/spikeinterface/postprocessing/template_similarity.py +++ b/src/spikeinterface/postprocessing/template_similarity.py @@ -324,9 +324,8 @@ def _compute_similarity_matrix_numba( local_mask = np.logical_or( sparsity_mask[i, :], other_sparsity_mask ) # shape (other_num_templates, num_channels) - for local_i in range(len(not_connected_mask)): - if not_connected_mask[local_i]: - local_mask[local_i] = False + for local_i in np.flatnonzero(not_connected_mask): + local_mask[local_i] = False elif support == "dense": local_mask = np.ones((other_num_templates, num_channels), dtype=np.bool_) From c60c7f01ea02c0153f7fca765324893aeafdf332 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 12 Nov 2025 22:07:53 +0000 Subject: [PATCH 08/12] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/spikeinterface/core/analyzer_extension_core.py | 11 +++++------ .../postprocessing/template_similarity.py | 4 ++-- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/src/spikeinterface/core/analyzer_extension_core.py b/src/spikeinterface/core/analyzer_extension_core.py index a01102b261..d1f82f0ff0 100644 --- a/src/spikeinterface/core/analyzer_extension_core.py +++ b/src/spikeinterface/core/analyzer_extension_core.py @@ -626,16 +626,15 @@ def _get_data(self, operator="average", percentile=None, outputs="numpy"): if self.sparsity is not None: # For consistency, we always return dense templates even if sparsity is used - dense_templates_array = np.zeros((templates_array.shape[0], - templates_array.shape[1], - self.sorting_analyzer.get_num_channels()), - dtype=templates_array.dtype) + dense_templates_array = np.zeros( + (templates_array.shape[0], templates_array.shape[1], self.sorting_analyzer.get_num_channels()), + dtype=templates_array.dtype, + ) for unit_index, unit_id in enumerate(self.sorting_analyzer.unit_ids): chan_inds = self.sparsity.unit_id_to_channel_indices[unit_id] - dense_templates_array[unit_index][:, chan_inds] = templates_array[unit_index, :, :chan_inds.size] + dense_templates_array[unit_index][:, chan_inds] = templates_array[unit_index, :, : chan_inds.size] templates_array = dense_templates_array - if outputs == "numpy": return templates_array elif outputs == "Templates": diff --git a/src/spikeinterface/postprocessing/template_similarity.py b/src/spikeinterface/postprocessing/template_similarity.py index 41b8c852f8..03714329a8 100644 --- a/src/spikeinterface/postprocessing/template_similarity.py +++ b/src/spikeinterface/postprocessing/template_similarity.py @@ -322,11 +322,11 @@ def _compute_similarity_matrix_numba( connected_mask = np.logical_and(sparsity_mask[i, :], other_sparsity_mask) not_connected_mask = np.sum(connected_mask, axis=1) == 0 local_mask = np.logical_or( - sparsity_mask[i, :], other_sparsity_mask + sparsity_mask[i, :], other_sparsity_mask ) # shape (other_num_templates, num_channels) for local_i in np.flatnonzero(not_connected_mask): local_mask[local_i] = False - + elif support == "dense": local_mask = np.ones((other_num_templates, num_channels), dtype=np.bool_) From d5e7f43f490bb33f79535eceb343850c79738a1d Mon Sep 17 00:00:00 2001 From: Sebastien Date: Thu, 13 Nov 2025 11:59:07 +0100 Subject: [PATCH 09/12] Cleaning --- src/spikeinterface/core/analyzer_extension_core.py | 11 ----------- src/spikeinterface/core/template_tools.py | 4 ++-- 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/src/spikeinterface/core/analyzer_extension_core.py b/src/spikeinterface/core/analyzer_extension_core.py index d1f82f0ff0..6efda66644 100644 --- a/src/spikeinterface/core/analyzer_extension_core.py +++ b/src/spikeinterface/core/analyzer_extension_core.py @@ -624,17 +624,6 @@ def _get_data(self, operator="average", percentile=None, outputs="numpy"): templates_array = self.data[key] - if self.sparsity is not None: - # For consistency, we always return dense templates even if sparsity is used - dense_templates_array = np.zeros( - (templates_array.shape[0], templates_array.shape[1], self.sorting_analyzer.get_num_channels()), - dtype=templates_array.dtype, - ) - for unit_index, unit_id in enumerate(self.sorting_analyzer.unit_ids): - chan_inds = self.sparsity.unit_id_to_channel_indices[unit_id] - dense_templates_array[unit_index][:, chan_inds] = templates_array[unit_index, :, : chan_inds.size] - templates_array = dense_templates_array - if outputs == "numpy": return templates_array elif outputs == "Templates": diff --git a/src/spikeinterface/core/template_tools.py b/src/spikeinterface/core/template_tools.py index 81ab985fd7..ecc878e1f4 100644 --- a/src/spikeinterface/core/template_tools.py +++ b/src/spikeinterface/core/template_tools.py @@ -35,9 +35,9 @@ def get_dense_templates_array(one_object: Templates | SortingAnalyzer, return_in ext = one_object.get_extension("templates") if ext is not None: if "average" in ext.data: - templates_array = ext.get_data("average") + templates_array = ext.data.get("average") elif "median" in ext.data: - templates_array = ext.get_data("median") + templates_array = ext.data.get("median") else: raise ValueError("Average or median templates have not been computed.") else: From 1e1d3aa9cb2f84a6dced141e4172bc672fad76e5 Mon Sep 17 00:00:00 2001 From: Sebastien Date: Thu, 13 Nov 2025 11:59:32 +0100 Subject: [PATCH 10/12] Cleaning --- src/spikeinterface/core/analyzer_extension_core.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/spikeinterface/core/analyzer_extension_core.py b/src/spikeinterface/core/analyzer_extension_core.py index 6efda66644..fea3f3618e 100644 --- a/src/spikeinterface/core/analyzer_extension_core.py +++ b/src/spikeinterface/core/analyzer_extension_core.py @@ -445,7 +445,6 @@ def _run(self, verbose=False, **job_kwargs): self.nafter, return_in_uV=return_in_uV, return_std=return_std, - sparsity_mask=None if self.sparsity is None else self.sparsity.mask, verbose=verbose, **job_kwargs, ) From 3031d9e37ea9b534a042609801cf16a54e8c26fc Mon Sep 17 00:00:00 2001 From: Pierre Yger Date: Thu, 13 Nov 2025 15:38:23 +0100 Subject: [PATCH 11/12] Speedup merging --- .../postprocessing/template_similarity.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/spikeinterface/postprocessing/template_similarity.py b/src/spikeinterface/postprocessing/template_similarity.py index 03714329a8..b6f054552d 100644 --- a/src/spikeinterface/postprocessing/template_similarity.py +++ b/src/spikeinterface/postprocessing/template_similarity.py @@ -110,16 +110,16 @@ def _merge_extension_data( n = all_new_unit_ids.size similarity = np.zeros((n, n), dtype=old_similarity.dtype) + local_mask = ~np.isin(all_new_unit_ids, new_unit_ids) + sub_units_ids = all_new_unit_ids[local_mask] + sub_units_inds = np.flatnonzero(local_mask) + old_units_inds = self.sorting_analyzer.sorting.ids_to_indices(sub_units_ids) + # copy old similarity - for unit_ind1, unit_id1 in enumerate(all_new_unit_ids): - if unit_id1 not in new_unit_ids: - old_ind1 = self.sorting_analyzer.sorting.id_to_index(unit_id1) - for unit_ind2, unit_id2 in enumerate(all_new_unit_ids): - if unit_id2 not in new_unit_ids: - old_ind2 = self.sorting_analyzer.sorting.id_to_index(unit_id2) - s = self.data["similarity"][old_ind1, old_ind2] - similarity[unit_ind1, unit_ind2] = s - similarity[unit_ind1, unit_ind2] = s + for old_ind1, unit_ind1 in zip(old_units_inds, sub_units_inds): + s = self.data["similarity"][old_ind1, old_units_inds] + similarity[unit_ind1, sub_units_inds] = s + similarity[sub_units_inds, unit_ind1] = s # insert new similarity both way for unit_ind, unit_id in enumerate(all_new_unit_ids): From b53a9f013e80c676433954a0f60d63f6bbf6205d Mon Sep 17 00:00:00 2001 From: Sebastien Date: Fri, 14 Nov 2025 09:06:31 +0100 Subject: [PATCH 12/12] Message --- src/spikeinterface/benchmark/benchmark_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spikeinterface/benchmark/benchmark_base.py b/src/spikeinterface/benchmark/benchmark_base.py index dab681a7be..63f4da09b4 100644 --- a/src/spikeinterface/benchmark/benchmark_base.py +++ b/src/spikeinterface/benchmark/benchmark_base.py @@ -431,7 +431,7 @@ def compute_results(self, case_keys=None, verbose=False, **result_params): if verbose: print("### Compute result", key, "###") benchmark = self.benchmarks[key] - assert benchmark is not None + assert benchmark is not None, f"Benchmkark for key {key} has not been run yet!" benchmark.compute_result(**result_params) benchmark.save_result(self.folder / "results" / self.key_to_str(key))