Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions src/spikeinterface/core/baserecording.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,6 +381,11 @@ def has_scaled_traces(self) -> bool:
bool
True if the recording has scaled traces, False otherwise
"""
warnings.warn(
"`has_scaled_traces` is deprecated and will be removed in 0.103.0. Use has_scaleable_traces() instead",
category=DeprecationWarning,
stacklevel=2,
)
return self.has_scaled()

def get_time_info(self, segment_index=None) -> dict:
Expand Down Expand Up @@ -640,6 +645,7 @@ def _channel_slice(self, channel_ids, renamed_channel_ids=None):
warnings.warn(
"This method will be removed in version 0.103, use `select_channels` or `rename_channels` instead.",
DeprecationWarning,
stacklevel=2,
)
sub_recording = ChannelSliceRecording(self, channel_ids, renamed_channel_ids=renamed_channel_ids)
return sub_recording
Expand Down
10 changes: 9 additions & 1 deletion src/spikeinterface/core/baserecordingsnippets.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,12 +48,20 @@ def get_num_channels(self):
def get_dtype(self):
return self._dtype

def has_scaled(self):
def has_scaleable_traces(self):
if self.get_property("gain_to_uV") is None or self.get_property("offset_to_uV") is None:
return False
else:
return True

def has_scaled(self):
warn(
"`has_scaled` has been deprecated and will be removed in 0.103.0. Please use `has_scaleable_traces()`",
category=DeprecationWarning,
stacklevel=2,
)
return self.has_scaleable_traces()

def has_probe(self):
return "contact_vector" in self.get_property_keys()

Expand Down
9 changes: 7 additions & 2 deletions src/spikeinterface/core/basesnippets.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,12 @@ def get_num_segments(self):
return len(self._snippets_segments)

def has_scaled_snippets(self):
return self.has_scaled()
warn(
"`has_scaled_snippets` is deprecated and will be removed in version 0.103.0. Please use `has_scaleable_traces()` instead",
category=DeprecationWarning,
stacklevel=2,
)
return self.has_scaleable_traces()

def get_frames(self, indices=None, segment_index: Union[int, None] = None):
segment_index = self._check_segment_index(segment_index)
Expand All @@ -101,7 +106,7 @@ def get_snippets(
wfs = spts.get_snippets(indices, channel_indices=channel_indices)

if return_scaled:
if not self.has_scaled():
if not self.has_scaleable_traces():
raise ValueError(
"These snippets do not support return_scaled=True (need gain_to_uV and offset_" "to_uV properties)"
)
Expand Down
2 changes: 1 addition & 1 deletion src/spikeinterface/core/sortinganalyzer.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def create_sorting_analyzer(
else:
sparsity = None

if return_scaled and not recording.has_scaled_traces() and recording.get_dtype().kind == "i":
if return_scaled and not recording.has_scaleable_traces() and recording.get_dtype().kind == "i":
print("create_sorting_analyzer: recording does not have scaling to uV, forcing return_scaled=False")
return_scaled = False

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def test_recording_s3_nwb_ros3(tmp_path):
assert full_traces.shape == (num_frames, num_chans)
assert full_traces.dtype == dtype

if rec.has_scaled():
if rec.has_scaleable_traces():
trace_scaled = rec.get_traces(segment_index=segment_index, return_scaled=True, end_frame=2)
assert trace_scaled.dtype == "float32"

Expand Down Expand Up @@ -76,7 +76,7 @@ def test_recording_s3_nwb_fsspec(tmp_path, cache):
assert full_traces.shape == (num_frames, num_chans)
assert full_traces.dtype == dtype

if rec.has_scaled():
if rec.has_scaleable_traces():
trace_scaled = rec.get_traces(segment_index=segment_index, return_scaled=True, end_frame=2)
assert trace_scaled.dtype == "float32"

Expand Down
2 changes: 1 addition & 1 deletion src/spikeinterface/postprocessing/amplitude_scalings.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ def __init__(
):
PipelineNode.__init__(self, recording, parents=parents, return_output=return_output)
self.return_scaled = return_scaled
if return_scaled and recording.has_scaled():
if return_scaled and recording.has_scaleable_traces():
self._dtype = np.float32
self._gains = recording.get_channel_gains()
self._offsets = recording.get_channel_gains()
Expand Down
2 changes: 1 addition & 1 deletion src/spikeinterface/postprocessing/spike_amplitudes.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def __init__(
):
PipelineNode.__init__(self, recording, parents=parents, return_output=return_output)
self.return_scaled = return_scaled
if return_scaled and recording.has_scaled():
if return_scaled and recording.has_scaleable_traces():
self._dtype = np.float32
self._gains = recording.get_channel_gains()
self._offsets = recording.get_channel_gains()
Expand Down
2 changes: 1 addition & 1 deletion src/spikeinterface/preprocessing/correct_lsb.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def correct_lsb(recording, num_chunks_per_segment=20, chunk_size=10000, seed=Non
# apply LSB division and instantiate parent
recording_lsb = scale(recording_lsb, gain=1.0 / lsb, dtype=dtype)
# if recording has scaled traces, correct gains
if recording.has_scaled():
if recording.has_scaleable_traces():
recording_lsb.set_channel_gains(recording_lsb.get_channel_gains() * lsb)
return recording_lsb

Expand Down
2 changes: 1 addition & 1 deletion src/spikeinterface/preprocessing/detect_bad_channels.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def detect_bad_channels(

elif method == "coherence+psd":
# some checks
assert recording.has_scaled(), (
assert recording.has_scaleable_traces(), (
"The 'coherence+psd' method uses thresholds assuming the traces are in uV, "
"but the recording does not have scaled traces. If the recording is already scaled, "
"you need to set gains and offsets: "
Expand Down
2 changes: 1 addition & 1 deletion src/spikeinterface/sorters/external/waveclus.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def _setup_recording(cls, recording, sorter_output_folder, params, verbose):
time_axis=0,
single_axis=True,
chunk_memory=params["chunk_memory"],
return_scaled=rec_sliced.has_scaled(),
return_scaled=rec_sliced.has_scaleable_traces(),
)

if verbose:
Expand Down
2 changes: 1 addition & 1 deletion src/spikeinterface/widgets/traces.py
Original file line number Diff line number Diff line change
Expand Up @@ -671,7 +671,7 @@ def _get_trace_list(recordings, channel_ids, time_range, segment_index, return_s

if return_scaled:
assert all(
rec.has_scaled() for rec in recordings.values()
rec.has_scaleable_traces() for rec in recordings.values()
), "Some recording layers do not have scaled traces. Use `return_scaled=False`"
if times is not None:
frame_range = np.searchsorted(times, time_range)
Expand Down