From 3b8b07787d211b060b3df5a503ebed264ccddb45 Mon Sep 17 00:00:00 2001 From: mbsantiago Date: Fri, 10 May 2024 11:01:17 +0100 Subject: [PATCH] migrated formatting to ruff --- .github/workflows/test.yml | 4 -- docs/user_guide/2_loading_audio.py | 2 +- src/soundevent/arrays/dimensions.py | 4 +- src/soundevent/arrays/operations.py | 18 ++++-- src/soundevent/audio/chunks.py | 3 +- src/soundevent/audio/filter.py | 4 +- src/soundevent/audio/media_info.py | 5 +- src/soundevent/audio/spectrum.py | 8 ++- src/soundevent/data/annotation_sets.py | 4 +- src/soundevent/data/annotation_tasks.py | 8 ++- src/soundevent/data/clip_annotations.py | 8 ++- src/soundevent/data/clip_evaluations.py | 8 ++- src/soundevent/data/evaluations.py | 4 +- src/soundevent/data/geometries.py | 36 +++++++---- src/soundevent/data/notes.py | 4 +- src/soundevent/data/prediction_sets.py | 4 +- src/soundevent/data/recording_sets.py | 4 +- src/soundevent/data/recordings.py | 5 +- src/soundevent/data/sequence_annotations.py | 4 +- .../data/sound_event_annotations.py | 4 +- src/soundevent/evaluation/affinity.py | 20 +++++-- src/soundevent/evaluation/encoding.py | 59 +++++++++++-------- src/soundevent/evaluation/match.py | 1 - src/soundevent/evaluation/metrics.py | 13 ++-- src/soundevent/evaluation/tasks/__init__.py | 4 +- .../evaluation/tasks/clip_classification.py | 7 ++- .../tasks/clip_multilabel_classification.py | 7 ++- src/soundevent/evaluation/tasks/common.py | 4 +- .../tasks/sound_event_classification.py | 19 ++++-- .../evaluation/tasks/sound_event_detection.py | 7 ++- src/soundevent/geometry/__init__.py | 5 +- src/soundevent/geometry/features.py | 16 +++-- src/soundevent/geometry/html.py | 6 +- src/soundevent/io/aoef/__init__.py | 13 +++- src/soundevent/io/aoef/adapters.py | 8 ++- src/soundevent/io/aoef/annotation_project.py | 27 ++++++--- src/soundevent/io/aoef/annotation_set.py | 21 +++++-- src/soundevent/io/aoef/clip_annotations.py | 19 ++++-- src/soundevent/io/aoef/clip_evaluation.py | 13 +++- src/soundevent/io/aoef/clip_predictions.py | 19 ++++-- src/soundevent/io/aoef/dataset.py | 4 +- src/soundevent/io/aoef/evaluation.py | 47 +++++++++++---- src/soundevent/io/aoef/evaluation_set.py | 11 +++- src/soundevent/io/aoef/match.py | 8 ++- src/soundevent/io/aoef/prediction_set.py | 17 ++++-- src/soundevent/io/aoef/recording.py | 18 ++++-- src/soundevent/io/aoef/recording_set.py | 3 +- src/soundevent/io/aoef/sequence.py | 4 +- src/soundevent/io/aoef/sequence_prediction.py | 3 +- src/soundevent/io/aoef/sound_event.py | 4 +- .../io/aoef/sound_event_annotation.py | 8 ++- .../io/aoef/sound_event_prediction.py | 11 +++- src/soundevent/io/crowsetta/__init__.py | 5 +- src/soundevent/io/crowsetta/annotation.py | 18 +++--- src/soundevent/io/crowsetta/bbox.py | 9 ++- src/soundevent/io/crowsetta/labels.py | 4 +- src/soundevent/io/crowsetta/segment.py | 5 +- src/soundevent/io/crowsetta/sequence.py | 1 - src/soundevent/io/formats.py | 4 +- src/soundevent/io/saver.py | 1 - src/soundevent/plot/annotation.py | 9 +-- src/soundevent/plot/geometries.py | 3 +- src/soundevent/plot/tags.py | 4 +- tests/conftest.py | 1 - tests/test_array/test_dimensions.py | 4 +- tests/test_array/test_operations.py | 2 +- tests/test_audio/test_filter.py | 1 - tests/test_audio/test_io.py | 7 ++- tests/test_audio/test_raw.py | 1 - tests/test_audio/test_resample.py | 1 - tests/test_audio/test_scaling.py | 1 - tests/test_data/test_datasets.py | 9 ++- tests/test_data/test_evaluated_samples.py | 1 - tests/test_data/test_geometry.py | 6 +- .../test_clip_classification.py | 20 +++++-- .../test_clip_multilabel_classification.py | 1 - tests/test_evaluation/test_encode.py | 6 +- tests/test_evaluation/test_matching.py | 4 +- tests/test_evaluation/test_metrics.py | 1 - .../test_sound_event_detection.py | 4 +- tests/test_geometry/conftest.py | 1 - tests/test_geometry/test_conversion.py | 1 - tests/test_geometry/test_html.py | 1 - tests/test_geometry/test_operations.py | 1 - tests/test_io/conftest.py | 1 - tests/test_io/test_annotation_projects.py | 57 ++++++++++++++---- tests/test_io/test_aoef/conftest.py | 1 - tests/test_io/test_aoef/test_api.py | 17 +++--- .../test_io/test_crowsetta/test_annotation.py | 9 ++- tests/test_io/test_crowsetta/test_bbox.py | 3 +- tests/test_io/test_crowsetta/test_import.py | 13 ++-- tests/test_io/test_crowsetta/test_labels.py | 4 +- tests/test_io/test_crowsetta/test_segments.py | 11 ++-- tests/test_io/test_crowsetta/test_sequence.py | 5 +- tests/test_io/test_model_runs.py | 14 ++++- 95 files changed, 582 insertions(+), 257 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f04eb83..870215d 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -23,10 +23,6 @@ jobs: python -m pip install --upgrade pip python -m pip install pytest hypothesis ruff mypy black html5lib python -m pip install ".[all]" - - name: Check format is correct - run: | - black --check src - black --check tests - name: Make sure types are consistent run: mypy --ignore-missing-imports src - name: Lint with ruff diff --git a/docs/user_guide/2_loading_audio.py b/docs/user_guide/2_loading_audio.py index 3aedb4b..281cbb4 100644 --- a/docs/user_guide/2_loading_audio.py +++ b/docs/user_guide/2_loading_audio.py @@ -6,7 +6,7 @@ [`xarray.DataArray`][xarray.DataArray] objects to hold loaded audio data. [`xarray.DataArray`][xarray.DataArray] objects are an extension of [`numpy`][numpy.ndarray] arrays, so there's no need to learn new concepts -if you are already familiar with [`numpy`][numpy.ndarray] arrays. +if you are already familiar with [`numpy`][numpy.ndarray] arrays. !!! note "Why use `xarray.DataArray` objects?" diff --git a/src/soundevent/arrays/dimensions.py b/src/soundevent/arrays/dimensions.py index b877b2e..203067e 100644 --- a/src/soundevent/arrays/dimensions.py +++ b/src/soundevent/arrays/dimensions.py @@ -493,7 +493,9 @@ def get_dim_step( return attrs[DimAttrs.step.value] if not estimate_step: - raise ValueError(f"Step size not found in the '{dim}' dimension attributes.") + raise ValueError( + f"Step size not found in the '{dim}' dimension attributes." + ) return estimate_dim_step( coord.data, diff --git a/src/soundevent/arrays/operations.py b/src/soundevent/arrays/operations.py index ba5a226..f01ac97 100644 --- a/src/soundevent/arrays/operations.py +++ b/src/soundevent/arrays/operations.py @@ -7,7 +7,11 @@ from numpy.typing import DTypeLike from xarray.core.types import InterpOptions -from soundevent.arrays.dimensions import create_range_dim, get_dim_range, get_dim_step +from soundevent.arrays.dimensions import ( + create_range_dim, + get_dim_range, + get_dim_step, +) __all__ = [ "center", @@ -88,7 +92,9 @@ def crop_dim( stop = current_stop if start > stop: - raise ValueError(f"Start value {start} must be less than stop value {stop}") + raise ValueError( + f"Start value {start} must be less than stop value {stop}" + ) if start < current_start or stop > current_stop: raise ValueError( @@ -174,7 +180,9 @@ def extend_dim( stop = current_stop if start > stop: - raise ValueError(f"Start value {start} must be less than stop value {stop}") + raise ValueError( + f"Start value {start} must be less than stop value {stop}" + ) step = get_dim_step(arr, dim) @@ -304,7 +312,9 @@ def set_value_at_pos( start, stop = get_dim_range(array, dim) if coord < start or coord > stop: - raise KeyError(f"Position {coord} is outside the range of dimension {dim}.") + raise KeyError( + f"Position {coord} is outside the range of dimension {dim}." + ) index = array.indexes[dim].get_slice_bound(coord, "right") indexer[dims[dim]] = index - 1 diff --git a/src/soundevent/audio/chunks.py b/src/soundevent/audio/chunks.py index 975e8bb..3f2b2d5 100644 --- a/src/soundevent/audio/chunks.py +++ b/src/soundevent/audio/chunks.py @@ -111,7 +111,8 @@ def _read_chunk(riff: BinaryIO) -> Optional[Chunk]: if chunk_id in CHUNKS_WITH_SUBCHUNKS: chunk.subchunks = { - subchunk.chunk_id: subchunk for subchunk in _get_subchunks(riff, size - 4) + subchunk.chunk_id: subchunk + for subchunk in _get_subchunks(riff, size - 4) } else: riff.seek(size, os.SEEK_CUR) diff --git a/src/soundevent/audio/filter.py b/src/soundevent/audio/filter.py index 63ff040..6df6111 100644 --- a/src/soundevent/audio/filter.py +++ b/src/soundevent/audio/filter.py @@ -18,7 +18,9 @@ def _get_filter( order: int = 5, ) -> np.ndarray: if low_freq is None and high_freq is None: - raise ValueError("At least one of low_freq and high_freq must be specified.") + raise ValueError( + "At least one of low_freq and high_freq must be specified." + ) if low_freq is None: # Low pass filter diff --git a/src/soundevent/audio/media_info.py b/src/soundevent/audio/media_info.py index 6e44bd8..0fba73a 100644 --- a/src/soundevent/audio/media_info.py +++ b/src/soundevent/audio/media_info.py @@ -156,7 +156,9 @@ def get_media_info(path: PathLike) -> MediaInfo: # chunk is the size of the data subchunk divided by the number # of channels and the bit depth. data_chunk = chunk.subchunks["data"] - samples = 8 * data_chunk.size // (fmt_info.channels * fmt_info.bit_depth) + samples = ( + 8 * data_chunk.size // (fmt_info.channels * fmt_info.bit_depth) + ) duration = samples / fmt_info.samplerate return MediaInfo( @@ -224,7 +226,6 @@ def generate_wav_header( The structure of the WAV header is described in (WAV PCM soundfile format)[http://soundfile.sapp.org/doc/WaveFormat/]. """ - data_size = samples * channels * bit_depth // 8 byte_rate = samplerate * channels * bit_depth // 8 block_align = channels * bit_depth // 8 diff --git a/src/soundevent/audio/spectrum.py b/src/soundevent/audio/spectrum.py index 3ab9f40..57a4738 100644 --- a/src/soundevent/audio/spectrum.py +++ b/src/soundevent/audio/spectrum.py @@ -120,7 +120,9 @@ def pcen_core( raise ValueError(f"eps={eps} must be strictly positive") if time_constant <= 0: - raise ValueError(f"time_constant={time_constant} must be strictly positive") + raise ValueError( + f"time_constant={time_constant} must be strictly positive" + ) if b is None: t_frames = time_constant * sr / float(hop_length) @@ -144,7 +146,9 @@ def pcen_core( if max_size == 1: ref = S elif S.ndim == 1: - raise ValueError("Max-filtering cannot be applied to 1-dimensional input") + raise ValueError( + "Max-filtering cannot be applied to 1-dimensional input" + ) else: if max_axis is None: if S.ndim != 2: diff --git a/src/soundevent/data/annotation_sets.py b/src/soundevent/data/annotation_sets.py index 7273282..6ea30e0 100644 --- a/src/soundevent/data/annotation_sets.py +++ b/src/soundevent/data/annotation_sets.py @@ -28,4 +28,6 @@ class AnnotationSet(BaseModel): default_factory=list, repr=False, ) - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) diff --git a/src/soundevent/data/annotation_tasks.py b/src/soundevent/data/annotation_tasks.py index 31ad438..3c9495c 100644 --- a/src/soundevent/data/annotation_tasks.py +++ b/src/soundevent/data/annotation_tasks.py @@ -60,11 +60,15 @@ class StatusBadge(BaseModel): state: AnnotationState owner: Optional[User] = None - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) class AnnotationTask(BaseModel): uuid: UUID = Field(default_factory=uuid4, repr=False) clip: Clip status_badges: List[StatusBadge] = Field(default_factory=list) - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) diff --git a/src/soundevent/data/clip_annotations.py b/src/soundevent/data/clip_annotations.py index acadefe..795c4b5 100644 --- a/src/soundevent/data/clip_annotations.py +++ b/src/soundevent/data/clip_annotations.py @@ -43,7 +43,9 @@ class ClipAnnotation(BaseModel): annotations A list of Annotation instances representing detailed annotations of sound events in the clip. - notes + + Notes + ----- A list of Note instances representing additional contextual information or remarks associated with the clip. """ @@ -54,4 +56,6 @@ class ClipAnnotation(BaseModel): sequences: List[SequenceAnnotation] = Field(default_factory=list) tags: List[Tag] = Field(default_factory=list) notes: List[Note] = Field(default_factory=list) - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) diff --git a/src/soundevent/data/clip_evaluations.py b/src/soundevent/data/clip_evaluations.py index 380a9c8..ad812b0 100644 --- a/src/soundevent/data/clip_evaluations.py +++ b/src/soundevent/data/clip_evaluations.py @@ -95,13 +95,17 @@ def _check_matches(self): } match_targets = [ - match.target.uuid for match in self.matches if match.target is not None + match.target.uuid + for match in self.matches + if match.target is not None ] match_targets_set = set(match_targets) match_sources = [ - match.source.uuid for match in self.matches if match.source is not None + match.source.uuid + for match in self.matches + if match.source is not None ] match_sources_set = set(match_sources) diff --git a/src/soundevent/data/evaluations.py b/src/soundevent/data/evaluations.py index 4ef34ed..d55db26 100644 --- a/src/soundevent/data/evaluations.py +++ b/src/soundevent/data/evaluations.py @@ -25,7 +25,9 @@ class Evaluation(BaseModel): """Evaluation Class.""" uuid: UUID = Field(default_factory=uuid4, repr=False) - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) evaluation_task: str clip_evaluations: Sequence[ClipEvaluation] = Field(default_factory=list) metrics: Sequence[Feature] = Field(default_factory=list) diff --git a/src/soundevent/data/geometries.py b/src/soundevent/data/geometries.py index be5af2c..a83a718 100644 --- a/src/soundevent/data/geometries.py +++ b/src/soundevent/data/geometries.py @@ -252,7 +252,9 @@ def _validate_time_interval(cls, v: List[Time]) -> List[Time]: after the end time). """ if len(v) != 2: - raise ValueError("The time interval must have exactly two time stamps.") + raise ValueError( + "The time interval must have exactly two time stamps." + ) if v[0] > v[1]: raise ValueError("The start time must be before the end time.") @@ -323,7 +325,9 @@ def _validate_coordinates(cls, v: List[float]) -> List[float]: raise ValueError("The time must be positive.") if frequency < 0 or frequency > MAX_FREQUENCY: - raise ValueError(f"The frequency must be between 0 and {MAX_FREQUENCY}.") + raise ValueError( + f"The frequency must be between 0 and {MAX_FREQUENCY}." + ) return v @@ -469,7 +473,8 @@ def _validate_coordinates( if frequency < 0 or frequency > MAX_FREQUENCY: raise ValueError( - f"The frequency must be between 0 and " f"{MAX_FREQUENCY}." + f"The frequency must be between 0 and " + f"{MAX_FREQUENCY}." ) return v @@ -527,7 +532,9 @@ def _validate_coordinates(cls, v: List[float]) -> List[float]: negative or the frequency is outside the valid range). """ if len(v) != 4: - raise ValueError("The bounding box must have exactly four coordinates.") + raise ValueError( + "The bounding box must have exactly four coordinates." + ) start_time, low_freq, end_time, high_freq = v @@ -551,7 +558,9 @@ def _validate_coordinates(cls, v: List[float]) -> List[float]: raise ValueError("The start time must be before the end time.") if low_freq > high_freq: - raise ValueError("The start frequency must be before the end frequency.") + raise ValueError( + "The start frequency must be before the end frequency." + ) return v @@ -762,7 +771,9 @@ def _validate_coordinates( negative or the frequency is outside the valid range). """ if len(v) < 1: - raise ValueError("The multipolygon must have at least one polygon.") + raise ValueError( + "The multipolygon must have at least one polygon." + ) for polygon in v: if len(polygon) < 1: @@ -770,7 +781,9 @@ def _validate_coordinates( for ring in polygon: if len(ring) < 3: - raise ValueError("Each ring must have at least three points.") + raise ValueError( + "Each ring must have at least three points." + ) for time, frequency in ring: if time < 0: @@ -778,7 +791,8 @@ def _validate_coordinates( if frequency < 0 or frequency > MAX_FREQUENCY: raise ValueError( - f"The frequency must be between 0 and " f"{MAX_FREQUENCY}." + f"The frequency must be between 0 and " + f"{MAX_FREQUENCY}." ) return v @@ -894,7 +908,7 @@ def geometry_validate( if not hasattr(obj, "type"): raise ValueError(f"Object {obj} does not have a type attribute.") - geom_type = getattr(obj, "type") + geom_type = obj.type if geom_type not in GEOMETRY_MAPPING: raise ValueError(f"Object {obj} does not have a geometry valid type.") @@ -907,4 +921,6 @@ def geometry_validate( from_attributes=mode == "attributes", ) except ValidationError as error: - raise ValueError(f"Object {obj} is not a valid {geom_type}.") from error + raise ValueError( + f"Object {obj} is not a valid {geom_type}." + ) from error diff --git a/src/soundevent/data/notes.py b/src/soundevent/data/notes.py index 3ee2de6..de0de81 100644 --- a/src/soundevent/data/notes.py +++ b/src/soundevent/data/notes.py @@ -95,7 +95,9 @@ class Note(BaseModel): message: str created_by: Optional[User] = None is_issue: bool = False - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) def __hash__(self): """Hash the Note object.""" diff --git a/src/soundevent/data/prediction_sets.py b/src/soundevent/data/prediction_sets.py index 1cf3bd6..edffd2a 100644 --- a/src/soundevent/data/prediction_sets.py +++ b/src/soundevent/data/prediction_sets.py @@ -73,4 +73,6 @@ class PredictionSet(BaseModel): uuid: UUID = Field(default_factory=uuid4) clip_predictions: List[ClipPrediction] = Field(default_factory=list) - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) diff --git a/src/soundevent/data/recording_sets.py b/src/soundevent/data/recording_sets.py index cb1ff5f..dde95ef 100644 --- a/src/soundevent/data/recording_sets.py +++ b/src/soundevent/data/recording_sets.py @@ -12,4 +12,6 @@ class RecordingSet(BaseModel): uuid: UUID = Field(default_factory=uuid4) recordings: List[Recording] = Field(default_factory=list, repr=False) - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) diff --git a/src/soundevent/data/recordings.py b/src/soundevent/data/recordings.py index b2cbb7c..088d23a 100644 --- a/src/soundevent/data/recordings.py +++ b/src/soundevent/data/recordings.py @@ -193,7 +193,10 @@ def from_file( Recording The recording object. """ - from soundevent.audio.media_info import compute_md5_checksum, get_media_info + from soundevent.audio.media_info import ( + compute_md5_checksum, + get_media_info, + ) media_info = get_media_info(path) diff --git a/src/soundevent/data/sequence_annotations.py b/src/soundevent/data/sequence_annotations.py index 47e2dfa..9aef0d9 100644 --- a/src/soundevent/data/sequence_annotations.py +++ b/src/soundevent/data/sequence_annotations.py @@ -25,7 +25,9 @@ class SequenceAnnotation(BaseModel): A unique identifier for the annotation. sequence The sequence being annotated. - notes + + Notes + ----- A list of notes associated with the sequence. tags The tags attached to the sequence providing semantic information. diff --git a/src/soundevent/data/sound_event_annotations.py b/src/soundevent/data/sound_event_annotations.py index a2e4316..647ca2b 100644 --- a/src/soundevent/data/sound_event_annotations.py +++ b/src/soundevent/data/sound_event_annotations.py @@ -83,7 +83,9 @@ class SoundEventAnnotation(BaseModel): being annotated. Sound events define distinct audio occurrences, such as bird calls or animal vocalizations, and are essential for categorizing the content of the audio data. - notes + + Notes + ----- A list of `Note` instances representing additional contextual information or remarks associated with the annotation. Notes can provide insights into specific characteristics of the sound event, aiding in the comprehensive understanding diff --git a/src/soundevent/evaluation/affinity.py b/src/soundevent/evaluation/affinity.py index 1de83f1..a2fe764 100644 --- a/src/soundevent/evaluation/affinity.py +++ b/src/soundevent/evaluation/affinity.py @@ -1,7 +1,11 @@ """Measures of affinity between sound events geometries.""" from soundevent import data -from soundevent.geometry import buffer_geometry, compute_bounds, geometry_to_shapely +from soundevent.geometry import ( + buffer_geometry, + compute_bounds, + geometry_to_shapely, +) __all__ = [ "compute_affinity", @@ -80,11 +84,13 @@ def compute_affinity( >>> affinity 0.75 """ - geometry1 = _prepare_geometry(geometry1, time_buffer, freq_buffer) geometry2 = _prepare_geometry(geometry2, time_buffer, freq_buffer) - if geometry1.type in TIME_GEOMETRY_TYPES or geometry2.type in TIME_GEOMETRY_TYPES: + if ( + geometry1.type in TIME_GEOMETRY_TYPES + or geometry2.type in TIME_GEOMETRY_TYPES + ): return compute_affinity_in_time(geometry1, geometry2) shp1 = geometry_to_shapely(geometry1) @@ -107,8 +113,12 @@ def compute_affinity_in_time( start_time1, _, end_time1, _ = compute_bounds(geometry1) start_time2, _, end_time2, _ = compute_bounds(geometry2) - intersection = max(0, min(end_time1, end_time2) - max(start_time1, start_time2)) - union = (end_time1 - start_time1) + (end_time2 - start_time2) - intersection + intersection = max( + 0, min(end_time1, end_time2) - max(start_time1, start_time2) + ) + union = ( + (end_time1 - start_time1) + (end_time2 - start_time2) - intersection + ) if union == 0: return 0 diff --git a/src/soundevent/evaluation/encoding.py b/src/soundevent/evaluation/encoding.py index c5676ab..306555c 100644 --- a/src/soundevent/evaluation/encoding.py +++ b/src/soundevent/evaluation/encoding.py @@ -142,10 +142,10 @@ def classification_encoding( -------- Consider the following set of tags: - >>> dog = data.Tag(key='animal', value='dog') - >>> cat = data.Tag(key='animal', value='cat') - >>> brown = data.Tag(key='color', value='brown') - >>> blue = data.Tag(key='color', value='blue') + >>> dog = data.Tag(key="animal", value="dog") + >>> cat = data.Tag(key="animal", value="cat") + >>> brown = data.Tag(key="color", value="brown") + >>> blue = data.Tag(key="color", value="blue") If we are interested in encoding only the 'dog' and 'brown' classes, the following examples demonstrate how the encoding works for tag list: @@ -193,10 +193,10 @@ def multilabel_encoding( -------- Consider the following set of tags: - >>> dog = data.Tag(key='animal', value='dog') - >>> cat = data.Tag(key='animal', value='cat') - >>> brown = data.Tag(key='color', value='brown') - >>> blue = data.Tag(key='color', value='blue') + >>> dog = data.Tag(key="animal", value="dog") + >>> cat = data.Tag(key="animal", value="cat") + >>> brown = data.Tag(key="color", value="brown") + >>> blue = data.Tag(key="color", value="blue") And we are only interested in encoding the following two classes: @@ -248,10 +248,10 @@ class corresponding to the input predicted tags. -------- Consider the following set of tags: - >>> dog = data.Tag(key='animal', value='dog') - >>> cat = data.Tag(key='animal', value='cat') - >>> brown = data.Tag(key='color', value='brown') - >>> blue = data.Tag(key='color', value='blue') + >>> dog = data.Tag(key="animal", value="dog") + >>> cat = data.Tag(key="animal", value="cat") + >>> brown = data.Tag(key="color", value="brown") + >>> blue = data.Tag(key="color", value="blue") And we are only interested in encoding the following two classes: @@ -259,21 +259,32 @@ class corresponding to the input predicted tags. Then the following examples show how the encoding works for predicted tags: - >>> prediction_encoding([data.PredictedTag(tag=brown, score=0.5)], encoder) + >>> prediction_encoding( + ... [data.PredictedTag(tag=brown, score=0.5)], encoder + ... ) array([0, 0.5]) - >>> multilabel_encoding([ - ... data.PredictedTag(tag=dog, score=0.2), - ... data.PredictedTag(tag=blue, score=0.9), - ... ], encoder) + >>> multilabel_encoding( + ... [ + ... data.PredictedTag(tag=dog, score=0.2), + ... data.PredictedTag(tag=blue, score=0.9), + ... ], + ... encoder, + ... ) array([0.2, 0]) - >>> multilabel_encoding([ - ... data.PredictedTag(tag=dog, score=0.2), - ... data.PredictedTag(tag=brown, score=0.5), - ... ], encoder) + >>> multilabel_encoding( + ... [ + ... data.PredictedTag(tag=dog, score=0.2), + ... data.PredictedTag(tag=brown, score=0.5), + ... ], + ... encoder, + ... ) array([0.2, 0.5]) - >>> classification_encoding([ - ... data.PredictedTag(tag=cat, score=0.7), - ... ], encoder) + >>> classification_encoding( + ... [ + ... data.PredictedTag(tag=cat, score=0.7), + ... ], + ... encoder, + ... ) array([0, 0]) """ encoded = np.zeros(encoder.num_classes, dtype=np.float32) diff --git a/src/soundevent/evaluation/match.py b/src/soundevent/evaluation/match.py index 9344368..189e8cb 100644 --- a/src/soundevent/evaluation/match.py +++ b/src/soundevent/evaluation/match.py @@ -44,7 +44,6 @@ def match_geometries( is not matched to any source geometry, the source index is None. Every source and target geometry is matched exactly once. """ - # Compute the affinity between all pairs of geometries. cost_matrix = np.zeros(shape=(len(source), len(target))) for (index1, geometry1), (index2, geometry2) in product( diff --git a/src/soundevent/evaluation/metrics.py b/src/soundevent/evaluation/metrics.py index 22e3c71..c1b0cf6 100644 --- a/src/soundevent/evaluation/metrics.py +++ b/src/soundevent/evaluation/metrics.py @@ -43,7 +43,9 @@ def balanced_accuracy( y_score: np.ndarray, ) -> float: num_classes = y_score.shape[1] - y_true_array = np.array([y if y is not None else num_classes for y in y_true]) + y_true_array = np.array( + [y if y is not None else num_classes for y in y_true] + ) y_score = np.c_[y_score, 1 - y_score.sum(axis=1, keepdims=True)] y_pred = y_score.argmax(axis=1) return metrics.balanced_accuracy_score( @@ -57,7 +59,9 @@ def accuracy( y_score: np.ndarray, ) -> float: num_classes = y_score.shape[1] - y_true_array = np.array([y if y is not None else num_classes for y in y_true]) + y_true_array = np.array( + [y if y is not None else num_classes for y in y_true] + ) y_score = np.c_[y_score, 1 - y_score.sum(axis=1, keepdims=True)] y_pred = y_score.argmax(axis=1) return metrics.accuracy_score( # type: ignore @@ -71,7 +75,9 @@ def top_3_accuracy( y_score: np.ndarray, ) -> float: num_classes = y_score.shape[1] - y_true_array = np.array([y if y is not None else num_classes for y in y_true]) + y_true_array = np.array( + [y if y is not None else num_classes for y in y_true] + ) y_score = np.c_[y_score, 1 - y_score.sum(axis=1, keepdims=True)] return metrics.top_k_accuracy_score( # type: ignore y_true=y_true_array, @@ -128,7 +134,6 @@ def jaccard( for each class. This function will convert the probabilities to binary predictions using the given threshold. """ - if y_true.ndim == 1: y_true = y_true[np.newaxis, :] diff --git a/src/soundevent/evaluation/tasks/__init__.py b/src/soundevent/evaluation/tasks/__init__.py index 0d1b3b3..bc14174 100644 --- a/src/soundevent/evaluation/tasks/__init__.py +++ b/src/soundevent/evaluation/tasks/__init__.py @@ -5,7 +5,9 @@ from soundevent.evaluation.tasks.sound_event_classification import ( sound_event_classification, ) -from soundevent.evaluation.tasks.sound_event_detection import sound_event_detection +from soundevent.evaluation.tasks.sound_event_detection import ( + sound_event_detection, +) __all__ = [ "clip_classification", diff --git a/src/soundevent/evaluation/tasks/clip_classification.py b/src/soundevent/evaluation/tasks/clip_classification.py index aa509ea..d68441c 100644 --- a/src/soundevent/evaluation/tasks/clip_classification.py +++ b/src/soundevent/evaluation/tasks/clip_classification.py @@ -89,7 +89,8 @@ def _evaluate_all_clips( def _compute_overall_metrics(true_classes, predicted_classes_scores): """Compute evaluation metrics based on true classes and predicted - scores.""" + scores. + """ evaluation_metrics = [ data.Feature( name=metric.__name__, @@ -164,6 +165,8 @@ def _compute_overall_score( evaluated_examples: Sequence[data.ClipEvaluation], ) -> float: non_none_scores = [ - example.score for example in evaluated_examples if example.score is not None + example.score + for example in evaluated_examples + if example.score is not None ] return float(np.mean(non_none_scores)) if non_none_scores else 0.0 diff --git a/src/soundevent/evaluation/tasks/clip_multilabel_classification.py b/src/soundevent/evaluation/tasks/clip_multilabel_classification.py index 6cc3249..7020b6e 100644 --- a/src/soundevent/evaluation/tasks/clip_multilabel_classification.py +++ b/src/soundevent/evaluation/tasks/clip_multilabel_classification.py @@ -98,7 +98,8 @@ def _compute_overall_metrics( predicted_classes_scores, ) -> List[data.Feature]: """Compute evaluation metrics based on true classes and predicted - scores.""" + scores. + """ return [ data.Feature( name=metric.__name__, @@ -166,6 +167,8 @@ def _compute_overall_score( evaluated_examples: Sequence[data.ClipEvaluation], ) -> float: valid_scores = [ - example.score for example in evaluated_examples if example.score is not None + example.score + for example in evaluated_examples + if example.score is not None ] return float(np.mean(valid_scores)) if valid_scores else 0.0 diff --git a/src/soundevent/evaluation/tasks/common.py b/src/soundevent/evaluation/tasks/common.py index 732f547..0e2cb58 100644 --- a/src/soundevent/evaluation/tasks/common.py +++ b/src/soundevent/evaluation/tasks/common.py @@ -7,7 +7,9 @@ def iterate_over_valid_clips( clip_predictions: Sequence[data.ClipPrediction], clip_annotations: Sequence[data.ClipAnnotation], ) -> Iterable[Tuple[data.ClipAnnotation, data.ClipPrediction]]: - annotated_clips = {example.clip.uuid: example for example in clip_annotations} + annotated_clips = { + example.clip.uuid: example for example in clip_annotations + } for predictions in clip_predictions: if predictions.clip.uuid in annotated_clips: diff --git a/src/soundevent/evaluation/tasks/sound_event_classification.py b/src/soundevent/evaluation/tasks/sound_event_classification.py index 029817c..9d7aba1 100644 --- a/src/soundevent/evaluation/tasks/sound_event_classification.py +++ b/src/soundevent/evaluation/tasks/sound_event_classification.py @@ -18,7 +18,9 @@ "sound_event_classification", ] -SOUNDEVENT_METRICS: Sequence[metrics.Metric] = (metrics.true_class_probability,) +SOUNDEVENT_METRICS: Sequence[metrics.Metric] = ( + metrics.true_class_probability, +) EXAMPLE_METRICS: Sequence[metrics.Metric] = () @@ -87,7 +89,8 @@ def _evaluate_clips( def _compute_overall_metrics(true_classes, predicted_classes_scores): """Compute evaluation metrics based on true classes and predicted - scores.""" + scores. + """ evaluation_metrics = [ data.Feature( name=metric.__name__, @@ -119,7 +122,9 @@ def _evaluate_clip( if sound_event_prediction.sound_event.uuid not in _valid_sound_events: continue - annotation = _valid_sound_events[sound_event_prediction.sound_event.uuid] + annotation = _valid_sound_events[ + sound_event_prediction.sound_event.uuid + ] true_class, predicted_classes, match = _evaluate_sound_event( sound_event_prediction=sound_event_prediction, sound_event_annotation=annotation, @@ -130,7 +135,9 @@ def _evaluate_clip( predicted_classes_scores.append(predicted_classes) matches.append(match) - score = np.mean([match.score for match in matches if match.score is not None]) + score = np.mean( + [match.score for match in matches if match.score is not None] + ) return ( true_classes, @@ -187,6 +194,8 @@ def _compute_overall_score( evaluated_clip: Sequence[data.ClipEvaluation], ) -> float: non_none_scores = [ - example.score for example in evaluated_clip if example.score is not None + example.score + for example in evaluated_clip + if example.score is not None ] return float(np.mean(non_none_scores)) if non_none_scores else 0.0 diff --git a/src/soundevent/evaluation/tasks/sound_event_detection.py b/src/soundevent/evaluation/tasks/sound_event_detection.py index 09a3ba9..04c251e 100644 --- a/src/soundevent/evaluation/tasks/sound_event_detection.py +++ b/src/soundevent/evaluation/tasks/sound_event_detection.py @@ -20,7 +20,9 @@ "evaluate_clip", ] -SOUNDEVENT_METRICS: Sequence[metrics.Metric] = (metrics.true_class_probability,) +SOUNDEVENT_METRICS: Sequence[metrics.Metric] = ( + metrics.true_class_probability, +) EXAMPLE_METRICS: Sequence[metrics.Metric] = () @@ -87,7 +89,8 @@ def _evaluate_clips( def compute_overall_metrics(true_classes, predicted_classes_scores): """Compute evaluation metrics based on true classes and predicted - scores.""" + scores. + """ evaluation_metrics = [ data.Feature( name=metric.__name__, diff --git a/src/soundevent/geometry/__init__.py b/src/soundevent/geometry/__init__.py index 79c7b10..7342d11 100644 --- a/src/soundevent/geometry/__init__.py +++ b/src/soundevent/geometry/__init__.py @@ -14,7 +14,10 @@ """ from soundevent.geometry.conversion import geometry_to_shapely -from soundevent.geometry.features import GeometricFeature, compute_geometric_features +from soundevent.geometry.features import ( + GeometricFeature, + compute_geometric_features, +) from soundevent.geometry.html import geometry_to_html from soundevent.geometry.operations import buffer_geometry, compute_bounds from soundevent.geometry.positions import get_geometry_point diff --git a/src/soundevent/geometry/features.py b/src/soundevent/geometry/features.py index 87b7dd3..a251bfe 100644 --- a/src/soundevent/geometry/features.py +++ b/src/soundevent/geometry/features.py @@ -154,7 +154,9 @@ def _compute_multi_point_features( Feature(name=GeometricFeature.LOW_FREQ, value=low_freq), Feature(name=GeometricFeature.HIGH_FREQ, value=high_freq), Feature(name=GeometricFeature.BANDWIDTH, value=high_freq - low_freq), - Feature(name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates)), + Feature( + name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates) + ), ] @@ -169,7 +171,9 @@ def _compute_multi_linestring_features( Feature(name=GeometricFeature.LOW_FREQ, value=low_freq), Feature(name=GeometricFeature.HIGH_FREQ, value=high_freq), Feature(name=GeometricFeature.BANDWIDTH, value=high_freq - low_freq), - Feature(name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates)), + Feature( + name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates) + ), ] @@ -184,11 +188,15 @@ def _compute_multi_polygon_features( Feature(name=GeometricFeature.LOW_FREQ, value=low_freq), Feature(name=GeometricFeature.HIGH_FREQ, value=high_freq), Feature(name=GeometricFeature.BANDWIDTH, value=high_freq - low_freq), - Feature(name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates)), + Feature( + name=GeometricFeature.NUM_SEGMENTS, value=len(geometry.coordinates) + ), ] -_COMPUTE_FEATURES: Dict[geometries.GeometryType, Callable[[Any], List[Feature]]] = { +_COMPUTE_FEATURES: Dict[ + geometries.GeometryType, Callable[[Any], List[Feature]] +] = { geometries.TimeStamp.geom_type(): _compute_time_stamp_features, geometries.TimeInterval.geom_type(): _compute_time_interval_features, geometries.BoundingBox.geom_type(): _compute_bounding_box_features, diff --git a/src/soundevent/geometry/html.py b/src/soundevent/geometry/html.py index 7554044..e705c85 100644 --- a/src/soundevent/geometry/html.py +++ b/src/soundevent/geometry/html.py @@ -105,7 +105,11 @@ def axis_label( inner_style = "; ".join( [ "display: inline", - ("vertical-align: top" if axis == "time" else "vertical-align: bottom"), + ( + "vertical-align: top" + if axis == "time" + else "vertical-align: bottom" + ), ] ) diff --git a/src/soundevent/io/aoef/__init__.py b/src/soundevent/io/aoef/__init__.py index 1aed5d9..08659c8 100644 --- a/src/soundevent/io/aoef/__init__.py +++ b/src/soundevent/io/aoef/__init__.py @@ -34,7 +34,10 @@ from soundevent import data from soundevent.io.types import DataCollections, DataType -from .annotation_project import AnnotationProjectAdapter, AnnotationProjectObject +from .annotation_project import ( + AnnotationProjectAdapter, + AnnotationProjectObject, +) from .annotation_set import AnnotationSetAdapter, AnnotationSetObject from .dataset import DatasetAdapter, DatasetObject from .evaluation import EvaluationAdapter, EvaluationObject @@ -84,7 +87,9 @@ class AOEFObject(BaseModel): """Schema definition for an AOEF object.""" version: str = AOEF_VERSION - created_on: datetime.datetime = Field(default_factory=datetime.datetime.now) + created_on: datetime.datetime = Field( + default_factory=datetime.datetime.now + ) data: Union[ EvaluationObject, DatasetObject, @@ -157,7 +162,9 @@ def load( if aoef_object.version != AOEF_VERSION: version = aoef_object.version - raise ValueError(f"Invalid AOEF version: {version} (expected {AOEF_VERSION})") + raise ValueError( + f"Invalid AOEF version: {version} (expected {AOEF_VERSION})" + ) return to_soundevent(aoef_object, audio_dir=audio_dir) diff --git a/src/soundevent/io/aoef/adapters.py b/src/soundevent/io/aoef/adapters.py index 1ea7920..ac21ca3 100644 --- a/src/soundevent/io/aoef/adapters.py +++ b/src/soundevent/io/aoef/adapters.py @@ -47,7 +47,9 @@ def to_aoef(self, obj: C) -> D: ... def to_soundevent(self, obj: D) -> C: ... -class DataAdapter(ABC, Generic[SoundEventObject, AOEFObject, SoundEventKey, AOEFKey]): +class DataAdapter( + ABC, Generic[SoundEventObject, AOEFObject, SoundEventKey, AOEFKey] +): """Base class for data adapters. A data adapter is used to convert between sound event and AOEF data @@ -64,7 +66,9 @@ def __init__(self): self._aoef_store: Dict[AOEFKey, AOEFObject] = {} @abstractmethod - def assemble_aoef(self, obj: SoundEventObject, obj_id: AOEFKey) -> AOEFObject: + def assemble_aoef( + self, obj: SoundEventObject, obj_id: AOEFKey + ) -> AOEFObject: """Create AOEF object from sound event object. Parameters diff --git a/src/soundevent/io/aoef/annotation_project.py b/src/soundevent/io/aoef/annotation_project.py index 5046f5e..ef630b9 100644 --- a/src/soundevent/io/aoef/annotation_project.py +++ b/src/soundevent/io/aoef/annotation_project.py @@ -26,18 +26,26 @@ def __init__( **kwargs, ): super().__init__(**kwargs) - self.annotation_task_adapter = annotation_task_adapter or AnnotationTaskAdapter( - self.clip_adapter, - self.user_adapter, + self.annotation_task_adapter = ( + annotation_task_adapter + or AnnotationTaskAdapter( + self.clip_adapter, + self.user_adapter, + ) ) def to_aoef( # type: ignore self, obj: data.AnnotationProject, # type: ignore ) -> AnnotationProjectObject: - tasks = [self.annotation_task_adapter.to_aoef(task) for task in obj.tasks or []] + tasks = [ + self.annotation_task_adapter.to_aoef(task) + for task in obj.tasks or [] + ] - project_tags = [self.tag_adapter.to_aoef(tag).id for tag in obj.annotation_tags] + project_tags = [ + self.tag_adapter.to_aoef(tag).id for tag in obj.annotation_tags + ] annotation_set = super().to_aoef(obj) @@ -67,11 +75,16 @@ def to_soundevent( # type: ignore annotation_set = super().to_soundevent(obj) tasks = [ - self.annotation_task_adapter.to_soundevent(task) for task in obj.tasks or [] + self.annotation_task_adapter.to_soundevent(task) + for task in obj.tasks or [] ] return data.AnnotationProject( - **{field: value for field, value in annotation_set if value is not None}, + **{ + field: value + for field, value in annotation_set + if value is not None + }, tasks=tasks, name=obj.name, description=obj.description, diff --git a/src/soundevent/io/aoef/annotation_set.py b/src/soundevent/io/aoef/annotation_set.py index 3feacb2..9ed43f6 100644 --- a/src/soundevent/io/aoef/annotation_set.py +++ b/src/soundevent/io/aoef/annotation_set.py @@ -11,7 +11,10 @@ from .note import NoteAdapter from .recording import RecordingAdapter, RecordingObject from .sequence import SequenceAdapter, SequenceObject -from .sequence_annotation import SequenceAnnotationAdapter, SequenceAnnotationObject +from .sequence_annotation import ( + SequenceAnnotationAdapter, + SequenceAnnotationObject, +) from .sound_event import SoundEventAdapter, SoundEventObject from .sound_event_annotation import ( SoundEventAnnotationAdapter, @@ -47,8 +50,12 @@ def __init__( sound_event_adapter: Optional[SoundEventAdapter] = None, sequence_adapter: Optional[SequenceAdapter] = None, clip_adapter: Optional[ClipAdapter] = None, - sound_event_annotations_adapter: Optional[SoundEventAnnotationAdapter] = None, - sequence_annotations_adapter: Optional[SequenceAnnotationAdapter] = None, + sound_event_annotations_adapter: Optional[ + SoundEventAnnotationAdapter + ] = None, + sequence_annotations_adapter: Optional[ + SequenceAnnotationAdapter + ] = None, clip_annotation_adapter: Optional[ClipAnnotationsAdapter] = None, ): self.user_adapter = user_adapter or UserAdapter() @@ -141,10 +148,14 @@ def to_soundevent( self.sequence_adapter.to_soundevent(sequence) for sound_event_annotation in obj.sound_event_annotations or []: - self.sound_event_annotations_adapter.to_soundevent(sound_event_annotation) + self.sound_event_annotations_adapter.to_soundevent( + sound_event_annotation + ) for sequence_annotation in obj.sequence_annotations or []: - self.sequence_annotations_adapter.to_soundevent(sequence_annotation) + self.sequence_annotations_adapter.to_soundevent( + sequence_annotation + ) annotated_clips = [ self.clip_annotation_adapter.to_soundevent(clip_annotation) diff --git a/src/soundevent/io/aoef/clip_annotations.py b/src/soundevent/io/aoef/clip_annotations.py index dc4cd19..c74cbe6 100644 --- a/src/soundevent/io/aoef/clip_annotations.py +++ b/src/soundevent/io/aoef/clip_annotations.py @@ -59,7 +59,9 @@ def assemble_aoef( ), sound_events=( [ - self.sound_event_annotation_adapter.to_aoef(annotation).uuid + self.sound_event_annotation_adapter.to_aoef( + annotation + ).uuid for annotation in obj.sound_events ] if obj.sound_events @@ -101,16 +103,25 @@ def assemble_soundevent( se_ann for annotation_id in obj.sound_events or [] if ( - se_ann := self.sound_event_annotation_adapter.from_id(annotation_id) + se_ann := self.sound_event_annotation_adapter.from_id( + annotation_id + ) ) is not None ], sequences=[ seq_ann for annotation_id in obj.sequences or [] - if (seq_ann := self.sequence_annotation_adapter.from_id(annotation_id)) + if ( + seq_ann := self.sequence_annotation_adapter.from_id( + annotation_id + ) + ) is not None ], - notes=[self.note_adapter.to_soundevent(note) for note in obj.notes or []], + notes=[ + self.note_adapter.to_soundevent(note) + for note in obj.notes or [] + ], created_on=obj.created_on or datetime.datetime.now(), ) diff --git a/src/soundevent/io/aoef/clip_evaluation.py b/src/soundevent/io/aoef/clip_evaluation.py index 73fd191..0cddee6 100644 --- a/src/soundevent/io/aoef/clip_evaluation.py +++ b/src/soundevent/io/aoef/clip_evaluation.py @@ -50,7 +50,10 @@ def assemble_aoef( annotations=annotations.uuid, predictions=predictions.uuid, matches=( - [self.match_adapter.to_aoef(match).uuid for match in obj.matches] + [ + self.match_adapter.to_aoef(match).uuid + for match in obj.matches + ] if obj.matches else None ), @@ -70,10 +73,14 @@ def assemble_soundevent( predictions = self.clip_predictions_adapter.from_id(obj.predictions) if annotations is None: - raise ValueError(f"Clip annotations with ID {obj.annotations} not found.") + raise ValueError( + f"Clip annotations with ID {obj.annotations} not found." + ) if predictions is None: - raise ValueError(f"Clip predictions with ID {obj.predictions} not found.") + raise ValueError( + f"Clip predictions with ID {obj.predictions} not found." + ) matches = [ match diff --git a/src/soundevent/io/aoef/clip_predictions.py b/src/soundevent/io/aoef/clip_predictions.py index cb0c358..df7dc2c 100644 --- a/src/soundevent/io/aoef/clip_predictions.py +++ b/src/soundevent/io/aoef/clip_predictions.py @@ -47,7 +47,9 @@ def assemble_aoef( clip=self.clip_adapter.to_aoef(obj.clip).uuid, sound_events=( [ - self.sound_event_prediction_adapter.to_aoef(sound_event).uuid + self.sound_event_prediction_adapter.to_aoef( + sound_event + ).uuid for sound_event in obj.sound_events ] if obj.sound_events @@ -65,7 +67,8 @@ def assemble_aoef( [ (tag.id, predicted_tag.score) for predicted_tag in obj.tags - if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) is not None + if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) + is not None ] if obj.tags else None @@ -92,13 +95,21 @@ def assemble_soundevent( sound_events=[ se_pred for sound_event in obj.sound_events or [] - if (se_pred := self.sound_event_prediction_adapter.from_id(sound_event)) + if ( + se_pred := self.sound_event_prediction_adapter.from_id( + sound_event + ) + ) is not None ], sequences=[ seq_pred for sequence in obj.sequences or [] - if (seq_pred := self.sequence_prediction_adapter.from_id(sequence)) + if ( + seq_pred := self.sequence_prediction_adapter.from_id( + sequence + ) + ) is not None ], tags=[ diff --git a/src/soundevent/io/aoef/dataset.py b/src/soundevent/io/aoef/dataset.py index c62bc59..e667bf4 100644 --- a/src/soundevent/io/aoef/dataset.py +++ b/src/soundevent/io/aoef/dataset.py @@ -35,7 +35,9 @@ def to_soundevent( # type: ignore ) -> data.Dataset: recording_set = super().to_soundevent(obj) return data.Dataset( - **{key: value for key, value in recording_set if value is not None}, + **{ + key: value for key, value in recording_set if value is not None + }, name=obj.name, description=obj.description, ) diff --git a/src/soundevent/io/aoef/evaluation.py b/src/soundevent/io/aoef/evaluation.py index 91b2dfe..e53e41f 100644 --- a/src/soundevent/io/aoef/evaluation.py +++ b/src/soundevent/io/aoef/evaluation.py @@ -14,8 +14,14 @@ from .note import NoteAdapter from .recording import RecordingAdapter, RecordingObject from .sequence import SequenceAdapter, SequenceObject -from .sequence_annotation import SequenceAnnotationAdapter, SequenceAnnotationObject -from .sequence_prediction import SequencePredictionAdapter, SequencePredictionObject +from .sequence_annotation import ( + SequenceAnnotationAdapter, + SequenceAnnotationObject, +) +from .sequence_prediction import ( + SequencePredictionAdapter, + SequencePredictionObject, +) from .sound_event import SoundEventAdapter, SoundEventObject from .sound_event_annotation import ( SoundEventAnnotationAdapter, @@ -63,11 +69,19 @@ def __init__( sound_event_adapter: Optional[SoundEventAdapter] = None, sequence_adapter: Optional[SequenceAdapter] = None, clip_adapter: Optional[ClipAdapter] = None, - sound_event_annotation_adapter: Optional[SoundEventAnnotationAdapter] = None, - sequence_annotation_adapter: Optional[SequenceAnnotationAdapter] = None, + sound_event_annotation_adapter: Optional[ + SoundEventAnnotationAdapter + ] = None, + sequence_annotation_adapter: Optional[ + SequenceAnnotationAdapter + ] = None, clip_annotations_adapter: Optional[ClipAnnotationsAdapter] = None, - sound_event_prediction_adapter: Optional[SoundEventPredictionAdapter] = None, - sequence_prediction_adapter: Optional[SequencePredictionAdapter] = None, + sound_event_prediction_adapter: Optional[ + SoundEventPredictionAdapter + ] = None, + sequence_prediction_adapter: Optional[ + SequencePredictionAdapter + ] = None, clip_predictions_adapter: Optional[ClipPredictionsAdapter] = None, clip_evaluation_adapter: Optional[ClipEvaluationAdapter] = None, match_adapter: Optional[MatchAdapter] = None, @@ -144,11 +158,14 @@ def __init__( self.sound_event_annotation_adapter, self.sound_event_prediction_adapter, ) - self.clip_evaluation_adapter = clip_evaluation_adapter or ClipEvaluationAdapter( - self.clip_annotations_adapter, - self.clip_predictions_adapter, - self.note_adapter, - self.match_adapter, + self.clip_evaluation_adapter = ( + clip_evaluation_adapter + or ClipEvaluationAdapter( + self.clip_annotations_adapter, + self.clip_predictions_adapter, + self.note_adapter, + self.match_adapter, + ) ) def to_aoef(self, obj: data.Evaluation) -> EvaluationObject: @@ -208,7 +225,9 @@ def to_soundevent( self.clip_adapter.to_soundevent(clip) for sound_event_annotation in obj.sound_event_annotations or []: - self.sound_event_annotation_adapter.to_soundevent(sound_event_annotation) + self.sound_event_annotation_adapter.to_soundevent( + sound_event_annotation + ) for sequence_annotation in obj.sequence_annotations or []: self.sequence_annotation_adapter.to_soundevent(sequence_annotation) @@ -217,7 +236,9 @@ def to_soundevent( self.clip_annotations_adapter.to_soundevent(clip_annotation) for sound_event_prediction in obj.sound_event_predictions or []: - self.sound_event_prediction_adapter.to_soundevent(sound_event_prediction) + self.sound_event_prediction_adapter.to_soundevent( + sound_event_prediction + ) for sequence_prediction in obj.sequence_predictions or []: self.sequence_prediction_adapter.to_soundevent(sequence_prediction) diff --git a/src/soundevent/io/aoef/evaluation_set.py b/src/soundevent/io/aoef/evaluation_set.py index 0242ed2..53b9e9f 100644 --- a/src/soundevent/io/aoef/evaluation_set.py +++ b/src/soundevent/io/aoef/evaluation_set.py @@ -35,7 +35,10 @@ def to_aoef( # type: ignore name=obj.name, description=obj.description, evaluation_tags=( - [self.tag_adapter.to_aoef(tag).id for tag in obj.evaluation_tags] + [ + self.tag_adapter.to_aoef(tag).id + for tag in obj.evaluation_tags + ] if obj.evaluation_tags else None ), @@ -47,7 +50,11 @@ def to_soundevent( # type: ignore ) -> data.EvaluationSet: annotation_set = super().to_soundevent(obj) return data.EvaluationSet( - **{field: value for field, value in annotation_set if value is not None}, + **{ + field: value + for field, value in annotation_set + if value is not None + }, name=obj.name, description=obj.description, evaluation_tags=[ diff --git a/src/soundevent/io/aoef/match.py b/src/soundevent/io/aoef/match.py index 0b76dda..fcc2a9a 100644 --- a/src/soundevent/io/aoef/match.py +++ b/src/soundevent/io/aoef/match.py @@ -36,12 +36,16 @@ def assemble_aoef( ) -> MatchObject: source = None if obj.source is not None: - prediction = self.sound_event_prediction_adapter.to_aoef(obj.source) + prediction = self.sound_event_prediction_adapter.to_aoef( + obj.source + ) source = prediction.uuid if prediction is not None else None target = None if obj.target is not None: - annotation = self.sound_event_annotation_adapter.to_aoef(obj.target) + annotation = self.sound_event_annotation_adapter.to_aoef( + obj.target + ) target = annotation.uuid if annotation is not None else None return MatchObject( diff --git a/src/soundevent/io/aoef/prediction_set.py b/src/soundevent/io/aoef/prediction_set.py index 58246b3..2c55188 100644 --- a/src/soundevent/io/aoef/prediction_set.py +++ b/src/soundevent/io/aoef/prediction_set.py @@ -11,7 +11,10 @@ from .note import NoteAdapter from .recording import RecordingAdapter, RecordingObject from .sequence import SequenceAdapter, SequenceObject -from .sequence_prediction import SequencePredictionAdapter, SequencePredictionObject +from .sequence_prediction import ( + SequencePredictionAdapter, + SequencePredictionObject, +) from .sound_event import SoundEventAdapter, SoundEventObject from .sound_event_prediction import ( SoundEventPredictionAdapter, @@ -47,8 +50,12 @@ def __init__( sound_event_adapter: Optional[SoundEventAdapter] = None, sequence_adapter: Optional[SequenceAdapter] = None, clip_adapter: Optional[ClipAdapter] = None, - sound_event_prediction_adapter: Optional[SoundEventPredictionAdapter] = None, - sequence_prediction_adapter: Optional[SequencePredictionAdapter] = None, + sound_event_prediction_adapter: Optional[ + SoundEventPredictionAdapter + ] = None, + sequence_prediction_adapter: Optional[ + SequencePredictionAdapter + ] = None, clip_predictions_adapter: Optional[ClipPredictionsAdapter] = None, ): self.user_adapter = user_adapter or UserAdapter() @@ -129,7 +136,9 @@ def to_soundevent(self, obj: PredictionSetObject) -> data.PredictionSet: self.clip_adapter.to_soundevent(clip) for sound_event_prediction in obj.sound_event_predictions or []: - self.sound_event_prediction_adapter.to_soundevent(sound_event_prediction) + self.sound_event_prediction_adapter.to_soundevent( + sound_event_prediction + ) for sequence_prediction in obj.sequence_predictions or []: self.sequence_prediction_adapter.to_soundevent(sequence_prediction) diff --git a/src/soundevent/io/aoef/recording.py b/src/soundevent/io/aoef/recording.py index dbfc61a..59e9021 100644 --- a/src/soundevent/io/aoef/recording.py +++ b/src/soundevent/io/aoef/recording.py @@ -34,7 +34,9 @@ class RecordingObject(BaseModel): rights: Optional[str] = None -class RecordingAdapter(DataAdapter[data.Recording, RecordingObject, UUID, UUID]): +class RecordingAdapter( + DataAdapter[data.Recording, RecordingObject, UUID, UUID] +): def __init__( self, user_adapter: UserAdapter, @@ -57,7 +59,10 @@ def assemble_aoef( notes = [self._note_adapter.to_aoef(note) for note in obj.notes] - owners = [self._user_adapter.to_aoef(owner).uuid for owner in obj.owners or []] + owners = [ + self._user_adapter.to_aoef(owner).uuid + for owner in obj.owners or [] + ] path = obj.path if self.audio_dir is not None: @@ -69,7 +74,9 @@ def assemble_aoef( duration=obj.duration, channels=obj.channels, samplerate=obj.samplerate, - time_expansion=(obj.time_expansion if obj.time_expansion != 1.0 else None), + time_expansion=( + obj.time_expansion if obj.time_expansion != 1.0 else None + ), hash=obj.hash, date=obj.date, time=obj.time, @@ -93,7 +100,10 @@ def assemble_soundevent(self, obj: RecordingObject) -> data.Recording: if (tag := self._tag_adapter.from_id(tag_id)) is not None ] - notes = [self._note_adapter.to_soundevent(note) for note in (obj.notes or [])] + notes = [ + self._note_adapter.to_soundevent(note) + for note in (obj.notes or []) + ] owners = [ user diff --git a/src/soundevent/io/aoef/recording_set.py b/src/soundevent/io/aoef/recording_set.py index 3707d6d..3e7c95a 100644 --- a/src/soundevent/io/aoef/recording_set.py +++ b/src/soundevent/io/aoef/recording_set.py @@ -47,7 +47,8 @@ def to_aoef( obj: data.RecordingSet, ) -> RecordingSetObject: recording_objects = [ - self.recording_adapter.to_aoef(recording) for recording in obj.recordings + self.recording_adapter.to_aoef(recording) + for recording in obj.recordings ] return RecordingSetObject( uuid=obj.uuid, diff --git a/src/soundevent/io/aoef/sequence.py b/src/soundevent/io/aoef/sequence.py index ec624b2..2c1e53e 100644 --- a/src/soundevent/io/aoef/sequence.py +++ b/src/soundevent/io/aoef/sequence.py @@ -28,7 +28,9 @@ def __init__( super().__init__() self.soundevent_adapter = soundevent_adapter - def assemble_aoef(self, obj: data.Sequence, obj_id: UUID) -> SequenceObject: + def assemble_aoef( + self, obj: data.Sequence, obj_id: UUID + ) -> SequenceObject: parent = None if obj.parent: parent = self.to_aoef(obj.parent).uuid diff --git a/src/soundevent/io/aoef/sequence_prediction.py b/src/soundevent/io/aoef/sequence_prediction.py index 1716d7d..636f3df 100644 --- a/src/soundevent/io/aoef/sequence_prediction.py +++ b/src/soundevent/io/aoef/sequence_prediction.py @@ -42,7 +42,8 @@ def assemble_aoef( [ (tag.id, predicted_tag.score) for predicted_tag in obj.tags - if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) is not None + if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) + is not None ] if obj.tags else None diff --git a/src/soundevent/io/aoef/sound_event.py b/src/soundevent/io/aoef/sound_event.py index 424a20c..9e5a175 100644 --- a/src/soundevent/io/aoef/sound_event.py +++ b/src/soundevent/io/aoef/sound_event.py @@ -18,7 +18,9 @@ class SoundEventObject(BaseModel): features: Optional[Dict[str, float]] = None -class SoundEventAdapter(DataAdapter[data.SoundEvent, SoundEventObject, UUID, UUID]): +class SoundEventAdapter( + DataAdapter[data.SoundEvent, SoundEventObject, UUID, UUID] +): def __init__( self, recording_adapter: RecordingAdapter, diff --git a/src/soundevent/io/aoef/sound_event_annotation.py b/src/soundevent/io/aoef/sound_event_annotation.py index 67733d4..afa07c3 100644 --- a/src/soundevent/io/aoef/sound_event_annotation.py +++ b/src/soundevent/io/aoef/sound_event_annotation.py @@ -22,7 +22,9 @@ class SoundEventAnnotationObject(BaseModel): class SoundEventAnnotationAdapter( - DataAdapter[data.SoundEventAnnotation, SoundEventAnnotationObject, UUID, UUID] + DataAdapter[ + data.SoundEventAnnotation, SoundEventAnnotationObject, UUID, UUID + ] ): def __init__( self, @@ -66,7 +68,9 @@ def assemble_soundevent( sound_event = self.sound_event_adapter.from_id(obj.sound_event) if sound_event is None: - raise ValueError(f"Sound event with ID {obj.sound_event} not found.") + raise ValueError( + f"Sound event with ID {obj.sound_event} not found." + ) return data.SoundEventAnnotation( uuid=obj.uuid, diff --git a/src/soundevent/io/aoef/sound_event_prediction.py b/src/soundevent/io/aoef/sound_event_prediction.py index 007f4ae..175e06a 100644 --- a/src/soundevent/io/aoef/sound_event_prediction.py +++ b/src/soundevent/io/aoef/sound_event_prediction.py @@ -18,7 +18,9 @@ class SoundEventPredictionObject(BaseModel): class SoundEventPredictionAdapter( - DataAdapter[data.SoundEventPrediction, SoundEventPredictionObject, UUID, UUID] + DataAdapter[ + data.SoundEventPrediction, SoundEventPredictionObject, UUID, UUID + ] ): def __init__( self, @@ -42,7 +44,8 @@ def assemble_aoef( [ (tag.id, predicted_tag.score) for predicted_tag in obj.tags - if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) is not None + if (tag := self.tag_adapter.to_aoef(predicted_tag.tag)) + is not None ] if obj.tags else None @@ -56,7 +59,9 @@ def assemble_soundevent( sound_event = self.sound_event_adapter.from_id(obj.sound_event) if sound_event is None: - raise ValueError(f"Sound event with ID {obj.sound_event} not found.") + raise ValueError( + f"Sound event with ID {obj.sound_event} not found." + ) return data.SoundEventPrediction( uuid=obj.uuid or uuid4(), diff --git a/src/soundevent/io/crowsetta/__init__.py b/src/soundevent/io/crowsetta/__init__.py index 63fc6a2..a933a8f 100644 --- a/src/soundevent/io/crowsetta/__init__.py +++ b/src/soundevent/io/crowsetta/__init__.py @@ -8,7 +8,10 @@ annotation_from_clip_annotation, annotation_to_clip_annotation, ) -from soundevent.io.crowsetta.bbox import bbox_from_annotation, bbox_to_annotation +from soundevent.io.crowsetta.bbox import ( + bbox_from_annotation, + bbox_to_annotation, +) from soundevent.io.crowsetta.labels import ( label_from_tag, label_from_tags, diff --git a/src/soundevent/io/crowsetta/annotation.py b/src/soundevent/io/crowsetta/annotation.py index 6a8e91e..7af1372 100644 --- a/src/soundevent/io/crowsetta/annotation.py +++ b/src/soundevent/io/crowsetta/annotation.py @@ -5,7 +5,10 @@ import crowsetta from soundevent import data -from soundevent.io.crowsetta.bbox import bbox_from_annotation, bbox_to_annotation +from soundevent.io.crowsetta.bbox import ( + bbox_from_annotation, + bbox_to_annotation, +) from soundevent.io.crowsetta.sequence import ( sequence_from_annotations, sequence_to_annotations, @@ -85,7 +88,8 @@ def annotation_from_clip_annotation( if annotation_fmt != "seq": raise ValueError( - "annotation_fmt must be either 'bbox' or 'seq', " f"not {annotation_fmt}." + "annotation_fmt must be either 'bbox' or 'seq', " + f"not {annotation_fmt}." ) return crowsetta.Annotation( @@ -148,7 +152,6 @@ def annotation_to_clip_annotation( data.ClipAnnotation A ClipAnnotation representing the converted Crowsetta annotation. """ - if tags is None: tags = [] @@ -172,7 +175,8 @@ def annotation_to_clip_annotation( if path is not None and path != recording.path: raise ValueError( - "The path of the annotation does not match the path of the " "recording." + "The path of the annotation does not match the path of the " + "recording." ) sound_event_annotations = [] @@ -190,9 +194,9 @@ def annotation_to_clip_annotation( ) ) - crowsetta_sequences: Union[List[crowsetta.Sequence], crowsetta.Sequence] = getattr( - annot, "seq", [] - ) + crowsetta_sequences: Union[ + List[crowsetta.Sequence], crowsetta.Sequence + ] = getattr(annot, "seq", []) if not isinstance(crowsetta_sequences, list): crowsetta_sequences = [crowsetta_sequences] diff --git a/src/soundevent/io/crowsetta/bbox.py b/src/soundevent/io/crowsetta/bbox.py index 3854a9c..78e6a98 100644 --- a/src/soundevent/io/crowsetta/bbox.py +++ b/src/soundevent/io/crowsetta/bbox.py @@ -23,7 +23,10 @@ def convert_geometry_to_bbox( "because the sound event geometry is not a BoundingBox." ) - if geometry.type in ["TimeInterval", "TimeStamp"] and raise_on_time_geometries: + if ( + geometry.type in ["TimeInterval", "TimeStamp"] + and raise_on_time_geometries + ): raise ValueError( "Cannot convert to a crowsetta bbox because " "the sound event geometry is a TimeInterval or TimeStamp " @@ -159,7 +162,9 @@ def bbox_to_annotation( low_freq = low_freq * recording.time_expansion high_freq = high_freq * recording.time_expansion - geometry = data.BoundingBox(coordinates=[start_time, low_freq, end_time, high_freq]) + geometry = data.BoundingBox( + coordinates=[start_time, low_freq, end_time, high_freq] + ) tags = label_to_tags(bbox.label, **kwargs) diff --git a/src/soundevent/io/crowsetta/labels.py b/src/soundevent/io/crowsetta/labels.py index 832d4fb..546d3a5 100644 --- a/src/soundevent/io/crowsetta/labels.py +++ b/src/soundevent/io/crowsetta/labels.py @@ -9,7 +9,7 @@ customize the conversion process using various options. """ -from typing import Callable, List, Optional, Sequence, Union +from typing import Callable, Dict, List, Optional, Sequence, Union from soundevent import data @@ -30,7 +30,7 @@ def label_to_tags( label: str, tag_fn: Optional[LabelToTagFn] = None, tag_mapping: Optional[LabelToTagMap] = None, - key_mapping: Optional[dict[str, str]] = None, + key_mapping: Optional[Dict[str, str]] = None, key: Optional[str] = None, fallback: str = "crowsetta", empty_labels: Sequence[str] = (EMPTY_LABEL,), diff --git a/src/soundevent/io/crowsetta/segment.py b/src/soundevent/io/crowsetta/segment.py index 259cf3d..64ab9f7 100644 --- a/src/soundevent/io/crowsetta/segment.py +++ b/src/soundevent/io/crowsetta/segment.py @@ -1,6 +1,6 @@ """crowsetta.segment module.""" -from typing import List, Optional +from typing import List, Optional, Tuple import crowsetta @@ -17,7 +17,7 @@ def convert_geometry_to_interval( geometry: data.Geometry, cast_to_segment: bool = False, -) -> tuple[float, float]: +) -> Tuple[float, float]: if geometry.type != "TimeInterval": if not cast_to_segment: raise ValueError( @@ -176,7 +176,6 @@ def segment_to_annotation( containing a SoundEvent with the time interval, associated tags, notes, and creator information. """ - if notes is None: notes = [] diff --git a/src/soundevent/io/crowsetta/sequence.py b/src/soundevent/io/crowsetta/sequence.py index 0dfd6f4..57c43d0 100644 --- a/src/soundevent/io/crowsetta/sequence.py +++ b/src/soundevent/io/crowsetta/sequence.py @@ -52,7 +52,6 @@ def sequence_from_annotations( ValueError If an annotation cannot be converted and `ignore_errors` is False. """ - segments = [] for annotation in annotations: diff --git a/src/soundevent/io/formats.py b/src/soundevent/io/formats.py index a23ae6e..8f90b14 100644 --- a/src/soundevent/io/formats.py +++ b/src/soundevent/io/formats.py @@ -36,4 +36,6 @@ def infer_format(path: PathLike) -> str: if inferrer(path): return format_ - raise ValueError(f"Cannot infer format of file {path}, or format not supported.") + raise ValueError( + f"Cannot infer format of file {path}, or format not supported." + ) diff --git a/src/soundevent/io/saver.py b/src/soundevent/io/saver.py index 9de0ba5..22b088a 100644 --- a/src/soundevent/io/saver.py +++ b/src/soundevent/io/saver.py @@ -42,7 +42,6 @@ def save( Format to save the data in. If `None`, the format will be inferred from the file extension. """ - if format is None: format = infer_format(path) diff --git a/src/soundevent/plot/annotation.py b/src/soundevent/plot/annotation.py index fc17e24..0998aba 100644 --- a/src/soundevent/plot/annotation.py +++ b/src/soundevent/plot/annotation.py @@ -21,7 +21,6 @@ def plot_annotation( **kwargs, ) -> Axes: """Plot an annotation.""" - geometry = annotation.sound_event.geometry if geometry is None: @@ -70,12 +69,12 @@ def get_tags_position( float Frequency position for tag plotting in Hertz. """ - func = _TAG_POSITION_FUNCTIONS.get(geometry.type, None) if func is None: raise NotImplementedError( - f"Plotting tags for geometry of type {geometry.type} " "is not implemented." + f"Plotting tags for geometry of type {geometry.type} " + "is not implemented." ) return func(geometry, bounds) @@ -116,7 +115,9 @@ def _get_tags_position_bounding_box( _TAG_POSITION_FUNCTIONS: Dict[ data.GeometryType, - Callable[[data.Geometry, Tuple[float, float, float, float]], Tuple[float, float]], + Callable[ + [data.Geometry, Tuple[float, float, float, float]], Tuple[float, float] + ], ] = { data.BoundingBox.geom_type(): _get_tags_position_bounding_box, } diff --git a/src/soundevent/plot/geometries.py b/src/soundevent/plot/geometries.py index 607c0af..f595ff2 100644 --- a/src/soundevent/plot/geometries.py +++ b/src/soundevent/plot/geometries.py @@ -58,7 +58,8 @@ def _plot_bounding_box_geometry( ) -> Axes: if not isinstance(geometry, data.BoundingBox): raise ValueError( - f"Expected geometry of type {data.BoundingBox}, " f"got {type(geometry)}." + f"Expected geometry of type {data.BoundingBox}, " + f"got {type(geometry)}." ) start_time, low_freq, end_time, high_freq = geometry.coordinates diff --git a/src/soundevent/plot/tags.py b/src/soundevent/plot/tags.py index 96581a8..73cc25b 100644 --- a/src/soundevent/plot/tags.py +++ b/src/soundevent/plot/tags.py @@ -29,7 +29,9 @@ def __init__( self._tags: Dict[data.Tag, str] = {} colormap = get_cmap(cmap) - self._colors = cycle([colormap(x) for x in np.linspace(0, 1, num_colors)]) + self._colors = cycle( + [colormap(x) for x in np.linspace(0, 1, num_colors)] + ) def get_color(self, tag: data.Tag) -> str: """Get color for tag.""" diff --git a/tests/conftest.py b/tests/conftest.py index 1440eca..4321d51 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,7 +10,6 @@ import numpy as np import pytest import soundfile as sf - from soundevent import data if sys.version_info < (3, 9): diff --git a/tests/test_array/test_dimensions.py b/tests/test_array/test_dimensions.py index 598d295..ab31c39 100644 --- a/tests/test_array/test_dimensions.py +++ b/tests/test_array/test_dimensions.py @@ -183,7 +183,9 @@ def test_create_frequency_dim_from_array_sets_attrs(): def test_create_frequency_dim_from_array_estimates_step(): """Test create_frequency_dim_from_array function.""" arr = np.array([1, 2, 3]) - frequency_dim = arrays.create_frequency_dim_from_array(arr, estimate_step=True) + frequency_dim = arrays.create_frequency_dim_from_array( + arr, estimate_step=True + ) assert frequency_dim.attrs["step"] == 1 diff --git a/tests/test_array/test_operations.py b/tests/test_array/test_operations.py index b64e2dd..b931fe5 100644 --- a/tests/test_array/test_operations.py +++ b/tests/test_array/test_operations.py @@ -1,7 +1,7 @@ """Test suite for the soundevent.arrays.operations module.""" -import pytest import numpy as np +import pytest import xarray as xr from soundevent.arrays import operations as ops diff --git a/tests/test_audio/test_filter.py b/tests/test_audio/test_filter.py index f43c912..5d488bb 100644 --- a/tests/test_audio/test_filter.py +++ b/tests/test_audio/test_filter.py @@ -6,7 +6,6 @@ import pytest import xarray as xr from scipy import signal - from soundevent import audio diff --git a/tests/test_audio/test_io.py b/tests/test_audio/test_io.py index 460e3b5..6bcbac4 100644 --- a/tests/test_audio/test_io.py +++ b/tests/test_audio/test_io.py @@ -1,9 +1,8 @@ -from typing import Optional from pathlib import Path +from typing import Optional import numpy as np import pytest - from soundevent.audio.io import audio_to_bytes, load_audio @@ -27,7 +26,9 @@ def test_audio_to_bytes_has_correct_length( dtype: np.dtype, ): samples = int(duration * samplerate) - array = np.random.random(size=[int(duration * samplerate), channels]).astype(dtype) + array = np.random.random( + size=[int(duration * samplerate), channels] + ).astype(dtype) bytes_per_sample = (bit_depth // 8) * channels expected_bytes = samples * bytes_per_sample diff --git a/tests/test_audio/test_raw.py b/tests/test_audio/test_raw.py index 5faa871..6abe601 100644 --- a/tests/test_audio/test_raw.py +++ b/tests/test_audio/test_raw.py @@ -1,7 +1,6 @@ """Test suite for the RawData class.""" import soundfile as sf - from soundevent.audio.chunks import parse_into_chunks from soundevent.audio.raw import RawData diff --git a/tests/test_audio/test_resample.py b/tests/test_audio/test_resample.py index 413176b..97c94e9 100644 --- a/tests/test_audio/test_resample.py +++ b/tests/test_audio/test_resample.py @@ -3,7 +3,6 @@ import numpy as np import pytest import xarray as xr - from soundevent import audio diff --git a/tests/test_audio/test_scaling.py b/tests/test_audio/test_scaling.py index 0f3e40c..4b684ae 100644 --- a/tests/test_audio/test_scaling.py +++ b/tests/test_audio/test_scaling.py @@ -3,7 +3,6 @@ import numpy as np import pytest import xarray as xr - from soundevent import data from soundevent.audio import ( clamp_amplitude, diff --git a/tests/test_data/test_datasets.py b/tests/test_data/test_datasets.py index a72938e..66e607e 100644 --- a/tests/test_data/test_datasets.py +++ b/tests/test_data/test_datasets.py @@ -3,7 +3,6 @@ from pathlib import Path import pytest - from soundevent import data @@ -52,7 +51,9 @@ def test_create_dataset_ignores_non_audio_files(tmp_path: Path): def test_create_dataset_fails_with_non_existing_directory(): """Test that we can create a dataset from audio files.""" with pytest.raises(ValueError): - data.Dataset.from_directory(Path("non-existing-directory"), name="test") + data.Dataset.from_directory( + Path("non-existing-directory"), name="test" + ) def test_create_dataset_fails_if_path_is_file(tmp_path: Path): @@ -76,7 +77,9 @@ def test_create_dataset_without_recursive(tmp_path: Path, random_wav): """Test that we can create a dataset from audio files.""" (tmp_path / "test1").mkdir() random_wav(path=tmp_path / "test1" / "test1.wav") - dataset = data.Dataset.from_directory(tmp_path, recursive=False, name="test") + dataset = data.Dataset.from_directory( + tmp_path, recursive=False, name="test" + ) assert len(dataset.recordings) == 0 diff --git a/tests/test_data/test_evaluated_samples.py b/tests/test_data/test_evaluated_samples.py index 00e197b..0789d24 100644 --- a/tests/test_data/test_evaluated_samples.py +++ b/tests/test_data/test_evaluated_samples.py @@ -2,7 +2,6 @@ import pytest from pydantic import ValidationError - from soundevent import data diff --git a/tests/test_data/test_geometry.py b/tests/test_data/test_geometry.py index ac0e45e..cbad0d9 100644 --- a/tests/test_data/test_geometry.py +++ b/tests/test_data/test_geometry.py @@ -5,7 +5,6 @@ from typing import List import pytest - from soundevent import data @@ -184,7 +183,9 @@ def test_load_multilinestring_from_dict(): def test_load_multilinestring_from_attributes(): """Test that a MultiLineString can be loaded from attributes.""" - obj = data.MultiLineString(coordinates=[[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) + obj = data.MultiLineString( + coordinates=[[[0, 1], [2, 3]], [[4, 5], [6, 7]]] + ) geom = data.geometry_validate(obj, mode="attributes") assert isinstance(geom, data.MultiLineString) assert geom.coordinates == [[[0, 1], [2, 3]], [[4, 5], [6, 7]]] @@ -278,7 +279,6 @@ def test_invalid_time_interval_fails(): def test_invalid_bounding_box_fails(): """Test that an invalid bounds fails.""" - # No negative time with pytest.raises(ValueError): data.BoundingBox(coordinates=[-1, 0, 0, 1]) diff --git a/tests/test_evaluation/test_clip_classification.py b/tests/test_evaluation/test_clip_classification.py index 48790e4..11523dd 100644 --- a/tests/test_evaluation/test_clip_classification.py +++ b/tests/test_evaluation/test_clip_classification.py @@ -4,7 +4,6 @@ from typing import List import pytest - from soundevent import data from soundevent.evaluation import clip_classification @@ -156,7 +155,9 @@ def test_evaluation_has_balanced_accuracy( tags=evaluation_tags, ) - balanced_accuracy = data.find_feature(evaluation.metrics, name="balanced_accuracy") + balanced_accuracy = data.find_feature( + evaluation.metrics, name="balanced_accuracy" + ) assert balanced_accuracy is not None assert math.isclose(balanced_accuracy.value, 0.5, rel_tol=1e-6) @@ -173,7 +174,9 @@ def test_evaluation_has_top_3_accuracy( tags=evaluation_tags, ) - top_3_accuracy = data.find_feature(evaluation.metrics, name="top_3_accuracy") + top_3_accuracy = data.find_feature( + evaluation.metrics, name="top_3_accuracy" + ) assert top_3_accuracy is not None assert math.isclose(top_3_accuracy.value, 1.0, rel_tol=1e-6) @@ -212,7 +215,8 @@ def test_overall_score_is_the_mean_of_the_scores_of_all_evaluated_clips( evaluation_tags: List[data.Tag], ): """Test that the overall score is the mean of the scores of all evaluated - examples.""" + examples. + """ evaluation = clip_classification( clip_annotations=annotation_set.clip_annotations, clip_predictions=prediction_set.clip_predictions, @@ -240,7 +244,11 @@ def test_each_example_score_is_the_probability_of_the_true_class( assert len(evaluation.clip_evaluations[1].metrics) == 1 assert evaluation.clip_evaluations[0].score is not None - assert math.isclose(evaluation.clip_evaluations[0].score, 0.9, rel_tol=1e-6) + assert math.isclose( + evaluation.clip_evaluations[0].score, 0.9, rel_tol=1e-6 + ) assert evaluation.clip_evaluations[1].score is not None - assert math.isclose(evaluation.clip_evaluations[1].score, 0.1, rel_tol=1e-6) + assert math.isclose( + evaluation.clip_evaluations[1].score, 0.1, rel_tol=1e-6 + ) diff --git a/tests/test_evaluation/test_clip_multilabel_classification.py b/tests/test_evaluation/test_clip_multilabel_classification.py index 6b74fcb..b22c627 100644 --- a/tests/test_evaluation/test_clip_multilabel_classification.py +++ b/tests/test_evaluation/test_clip_multilabel_classification.py @@ -3,7 +3,6 @@ from typing import List import pytest - from soundevent import data from soundevent.evaluation import clip_multilabel_classification diff --git a/tests/test_evaluation/test_encode.py b/tests/test_evaluation/test_encode.py index 1d7bb50..0bcadb5 100644 --- a/tests/test_evaluation/test_encode.py +++ b/tests/test_evaluation/test_encode.py @@ -4,7 +4,6 @@ import numpy as np import pytest - from soundevent import data from soundevent.evaluation import ( classification_encoding, @@ -16,7 +15,9 @@ @pytest.fixture -def tags(random_tags: Callable[[int], Sequence[data.Tag]]) -> Sequence[data.Tag]: +def tags( + random_tags: Callable[[int], Sequence[data.Tag]], +) -> Sequence[data.Tag]: """Tags for testing.""" return random_tags(10) @@ -35,7 +36,6 @@ def test_classification_encoding( encoder: Encoder, ): """Test encoding objects with tags.""" - encoded = classification_encoding( tags=[tags[3]], encoder=encoder, diff --git a/tests/test_evaluation/test_matching.py b/tests/test_evaluation/test_matching.py index 8ce744d..0c858da 100644 --- a/tests/test_evaluation/test_matching.py +++ b/tests/test_evaluation/test_matching.py @@ -96,7 +96,9 @@ def test_multi_linestring_is_supported(): def test_multi_polygon_is_supported(): - multi_polygon = data.MultiPolygon(coordinates=[[[[1, 2], [4, 3], [5, 6], [1, 2]]]]) + multi_polygon = data.MultiPolygon( + coordinates=[[[[1, 2], [4, 3], [5, 6], [1, 2]]]] + ) matches = list(match_geometries([multi_polygon], [multi_polygon])) assert len(matches) == 1 source_index, target_index, affinity = matches[0] diff --git a/tests/test_evaluation/test_metrics.py b/tests/test_evaluation/test_metrics.py index 3597d27..c447d2f 100644 --- a/tests/test_evaluation/test_metrics.py +++ b/tests/test_evaluation/test_metrics.py @@ -1,7 +1,6 @@ """Test suite for soundevent.evaluation.metrics.py.""" import numpy as np - from soundevent.evaluation import metrics diff --git a/tests/test_evaluation/test_sound_event_detection.py b/tests/test_evaluation/test_sound_event_detection.py index a798832..09080e5 100644 --- a/tests/test_evaluation/test_sound_event_detection.py +++ b/tests/test_evaluation/test_sound_event_detection.py @@ -28,7 +28,9 @@ def test_can_evaluate_nips_data(): assert isinstance(evaluation, data.Evaluation) # check that all clips have been evaluated - assert len(evaluation.clip_evaluations) == len(evaluation_set.clip_annotations) + assert len(evaluation.clip_evaluations) == len( + evaluation_set.clip_annotations + ) # check that all metrics are present assert len(evaluation.metrics) == 4 diff --git a/tests/test_geometry/conftest.py b/tests/test_geometry/conftest.py index d28a055..1d69a60 100644 --- a/tests/test_geometry/conftest.py +++ b/tests/test_geometry/conftest.py @@ -1,7 +1,6 @@ """Common fixtures for testing geometry functions.""" import pytest - from soundevent import data diff --git a/tests/test_geometry/test_conversion.py b/tests/test_geometry/test_conversion.py index a53398a..0ff44e4 100644 --- a/tests/test_geometry/test_conversion.py +++ b/tests/test_geometry/test_conversion.py @@ -1,7 +1,6 @@ """Test Suite for geometry conversion functions.""" import shapely - from soundevent import data, geometry diff --git a/tests/test_geometry/test_html.py b/tests/test_geometry/test_html.py index dd019cf..39d217a 100644 --- a/tests/test_geometry/test_html.py +++ b/tests/test_geometry/test_html.py @@ -1,7 +1,6 @@ """Test that geometries get converted to HTML.""" import html5lib - from soundevent import data from soundevent.geometry.html import geometry_to_html diff --git a/tests/test_geometry/test_operations.py b/tests/test_geometry/test_operations.py index e400214..c555aff 100644 --- a/tests/test_geometry/test_operations.py +++ b/tests/test_geometry/test_operations.py @@ -4,7 +4,6 @@ from typing import List import pytest - from soundevent import data from soundevent.data.geometries import BaseGeometry from soundevent.geometry.operations import buffer_geometry, compute_bounds diff --git a/tests/test_io/conftest.py b/tests/test_io/conftest.py index 725e532..f1de546 100644 --- a/tests/test_io/conftest.py +++ b/tests/test_io/conftest.py @@ -3,7 +3,6 @@ from typing import Callable, List import pytest - from soundevent import data diff --git a/tests/test_io/test_annotation_projects.py b/tests/test_io/test_annotation_projects.py index 580b487..85a6874 100644 --- a/tests/test_io/test_annotation_projects.py +++ b/tests/test_io/test_annotation_projects.py @@ -40,7 +40,9 @@ def test_saved_annotation_project_is_saved_to_json_file( assert path.exists() -def test_saved_annotation_project_has_correct_info(monkeypatch, tmp_path: Path) -> None: +def test_saved_annotation_project_has_correct_info( + monkeypatch, tmp_path: Path +) -> None: """Test that the saved annotation project has the correct info.""" # Arrange annotation_project = data.AnnotationProject( @@ -173,7 +175,10 @@ def test_can_recover_task_status( # Assert assert recovered == annotation_project - assert recovered.tasks[0].status_badges[0].state == data.AnnotationState.completed + assert ( + recovered.tasks[0].status_badges[0].state + == data.AnnotationState.completed + ) def test_can_recover_user_that_completed_task( @@ -280,7 +285,9 @@ def test_can_recover_task_simple_annotation( clip_annotations=[ data.ClipAnnotation( clip=clip, - sound_events=[data.SoundEventAnnotation(sound_event=sound_event)], + sound_events=[ + data.SoundEventAnnotation(sound_event=sound_event) + ], ) ], tasks=[data.AnnotationTask(clip=clip)], @@ -294,7 +301,8 @@ def test_can_recover_task_simple_annotation( # Assert assert recovered == annotation_project assert ( - recovered.clip_annotations[0].sound_events[0].sound_event.geometry is not None + recovered.clip_annotations[0].sound_events[0].sound_event.geometry + is not None ) assert sound_event.geometry is not None assert ( @@ -302,7 +310,9 @@ def test_can_recover_task_simple_annotation( == sound_event.geometry.type ) assert ( - recovered.clip_annotations[0].sound_events[0].sound_event.geometry.coordinates + recovered.clip_annotations[0] + .sound_events[0] + .sound_event.geometry.coordinates == sound_event.geometry.coordinates ) @@ -342,8 +352,13 @@ def test_can_recover_task_annotation_with_tags( # Assert assert recovered == annotation_project - assert recovered.clip_annotations[0].sound_events[0].tags[0].key == "species" - assert recovered.clip_annotations[0].sound_events[0].tags[0].value == "test_species" + assert ( + recovered.clip_annotations[0].sound_events[0].tags[0].key == "species" + ) + assert ( + recovered.clip_annotations[0].sound_events[0].tags[0].value + == "test_species" + ) def test_can_recover_annotation_creator( @@ -394,7 +409,9 @@ def test_can_recover_annotation_creation_date( data.ClipAnnotation( clip=clip, sound_events=[ - data.SoundEventAnnotation(sound_event=sound_event, created_on=date) + data.SoundEventAnnotation( + sound_event=sound_event, created_on=date + ) ], ), ], @@ -447,8 +464,14 @@ def test_can_recover_annotation_notes( # Assert assert recovered == annotation_project - assert recovered.clip_annotations[0].sound_events[0].notes[0].message == "test_note" - assert recovered.clip_annotations[0].sound_events[0].notes[0].created_by == user + assert ( + recovered.clip_annotations[0].sound_events[0].notes[0].message + == "test_note" + ) + assert ( + recovered.clip_annotations[0].sound_events[0].notes[0].created_by + == user + ) def test_can_recover_sound_event_features( @@ -490,11 +513,17 @@ def test_can_recover_sound_event_features( # Assert assert recovered == annotation_project assert ( - recovered.clip_annotations[0].sound_events[0].sound_event.features[0].name + recovered.clip_annotations[0] + .sound_events[0] + .sound_event.features[0] + .name == "duration" ) assert ( - recovered.clip_annotations[0].sound_events[0].sound_event.features[0].value + recovered.clip_annotations[0] + .sound_events[0] + .sound_event.features[0] + .value == 1.0 ) @@ -535,7 +564,9 @@ def test_recording_paths_are_stored_as_relative_if_audio_dir_is_provided( def test_can_parse_nips4plus(tmp_path: Path): """Test that NIPS4BPlus annotations can be parsed.""" - original_path = BASE_DIR / "docs" / "user_guide" / "nips4b_plus_sample.json" + original_path = ( + BASE_DIR / "docs" / "user_guide" / "nips4b_plus_sample.json" + ) path = tmp_path / "test.json" # Act diff --git a/tests/test_io/test_aoef/conftest.py b/tests/test_io/test_aoef/conftest.py index 0073f4b..a2a1421 100644 --- a/tests/test_io/test_aoef/conftest.py +++ b/tests/test_io/test_aoef/conftest.py @@ -1,7 +1,6 @@ from pathlib import Path import pytest - from soundevent.io.aoef.annotation_project import AnnotationProjectAdapter from soundevent.io.aoef.annotation_set import AnnotationSetAdapter from soundevent.io.aoef.annotation_task import AnnotationTaskAdapter diff --git a/tests/test_io/test_aoef/test_api.py b/tests/test_io/test_aoef/test_api.py index b671410..e6b2117 100644 --- a/tests/test_io/test_aoef/test_api.py +++ b/tests/test_io/test_aoef/test_api.py @@ -5,9 +5,7 @@ from pathlib import Path import pytest - -from soundevent import data -from soundevent import io +from soundevent import data, io def test_load_fails_if_file_does_not_exist(): @@ -33,7 +31,8 @@ def test_load_fails_if_file_is_not_a_json_file(tmp_path): def test_load_fails_if_collection_type_is_not_supported(tmp_path): """Test that the load function fails if the collection type is not - supported.""" + supported. + """ # Arrange path = tmp_path / "collection_type_not_supported.json" path.write_text( @@ -53,7 +52,8 @@ def test_load_fails_if_collection_type_is_not_supported(tmp_path): def test_load_fails_if_aoef_version_is_not_supported(tmp_path): """Test that the load function fails if the aoef version is not - supported.""" + supported. + """ # Arrange path = tmp_path / "aoef_version_not_supported.json" path.write_text( @@ -71,7 +71,9 @@ def test_load_fails_if_aoef_version_is_not_supported(tmp_path): io.load(path) -def test_save_creates_parent_directories(tmp_path: Path, dataset: data.Dataset): +def test_save_creates_parent_directories( + tmp_path: Path, dataset: data.Dataset +): """Test that the save function creates parent directories.""" # Arrange path = tmp_path / "parent" / "child" / "test.json" @@ -91,7 +93,8 @@ def test_save_fails_if_trying_to_save_unsupported_collection_type( clip_evaluation: data.ClipEvaluation, ): """Test that the save function fails if trying to save an unsupported - collection type.""" + collection type. + """ # Arrange path = tmp_path / "unsupported_collection_type.json" diff --git a/tests/test_io/test_crowsetta/test_annotation.py b/tests/test_io/test_crowsetta/test_annotation.py index 07cc7f0..2ed8eb3 100644 --- a/tests/test_io/test_crowsetta/test_annotation.py +++ b/tests/test_io/test_crowsetta/test_annotation.py @@ -5,7 +5,6 @@ import crowsetta import pytest - import soundevent.io.crowsetta as crowsetta_io from soundevent import data from soundevent.io.crowsetta.segment import create_crowsetta_segment @@ -35,7 +34,9 @@ def clip_annotation(recording: data.Recording) -> data.ClipAnnotation: data.SoundEventAnnotation( sound_event=data.SoundEvent( recording=recording, - geometry=data.BoundingBox(coordinates=[0.5, 0.5, 1.5, 1.5]), + geometry=data.BoundingBox( + coordinates=[0.5, 0.5, 1.5, 1.5] + ), features=[data.Feature(name="test", value=1.0)], ), tags=[data.Tag(key="animal", value="cat")], @@ -44,7 +45,9 @@ def clip_annotation(recording: data.Recording) -> data.ClipAnnotation: data.SoundEventAnnotation( sound_event=data.SoundEvent( recording=recording, - geometry=data.LineString(coordinates=[[0.5, 0.5], [1.5, 1.5]]), + geometry=data.LineString( + coordinates=[[0.5, 0.5], [1.5, 1.5]] + ), features=[data.Feature(name="test", value=1.0)], ), tags=[data.Tag(key="animal", value="cat")], diff --git a/tests/test_io/test_crowsetta/test_bbox.py b/tests/test_io/test_crowsetta/test_bbox.py index 9935b48..f9aba0a 100644 --- a/tests/test_io/test_crowsetta/test_bbox.py +++ b/tests/test_io/test_crowsetta/test_bbox.py @@ -2,9 +2,8 @@ import crowsetta import pytest - -from soundevent import data import soundevent.io.crowsetta as crowsetta_io +from soundevent import data @pytest.fixture diff --git a/tests/test_io/test_crowsetta/test_import.py b/tests/test_io/test_crowsetta/test_import.py index 16e688c..8266c6f 100644 --- a/tests/test_io/test_crowsetta/test_import.py +++ b/tests/test_io/test_crowsetta/test_import.py @@ -7,9 +7,8 @@ from pathlib import Path import crowsetta - -from soundevent import data import soundevent.io.crowsetta as crowsetta_io +from soundevent import data @pytest.mark.skipif( @@ -52,9 +51,9 @@ def test_can_import_all_example_formats( from_file_kwargs = {"audio_path": recording.path} to_annot_kwargs = {"samplerate": recording.samplerate} - annotation = scribe.from_file(example.annot_path, **from_file_kwargs).to_annot( - **to_annot_kwargs - ) + annotation = scribe.from_file( + example.annot_path, **from_file_kwargs + ).to_annot(**to_annot_kwargs) if isinstance(annotation, list): annotation = annotation[0] @@ -62,7 +61,9 @@ def test_can_import_all_example_formats( assert isinstance(annotation, crowsetta.Annotation) if annotation.notated_path is not None: - recording = recording.model_copy(update=dict(path=annotation.notated_path)) + recording = recording.model_copy( + update=dict(path=annotation.notated_path) + ) clip_annotation = crowsetta_io.annotation_to_clip_annotation( annotation, diff --git a/tests/test_io/test_crowsetta/test_labels.py b/tests/test_io/test_crowsetta/test_labels.py index 26b2a15..5316aa4 100644 --- a/tests/test_io/test_crowsetta/test_labels.py +++ b/tests/test_io/test_crowsetta/test_labels.py @@ -136,7 +136,9 @@ def test_label_to_tags_with_key_mapping(): def test_label_to_tags_with_key_mapping_fallback(): key_mapping = {"bat": "animal"} - tag = crowsetta_io.label_to_tags("dog", key_mapping=key_mapping, fallback="pet") + tag = crowsetta_io.label_to_tags( + "dog", key_mapping=key_mapping, fallback="pet" + ) assert tag == [data.Tag(key="pet", value="dog")] diff --git a/tests/test_io/test_crowsetta/test_segments.py b/tests/test_io/test_crowsetta/test_segments.py index e8bacfe..79ead19 100644 --- a/tests/test_io/test_crowsetta/test_segments.py +++ b/tests/test_io/test_crowsetta/test_segments.py @@ -2,9 +2,8 @@ import crowsetta import pytest - -from soundevent import data import soundevent.io.crowsetta as crowsetta_io +from soundevent import data from soundevent.io.crowsetta.segment import ( create_crowsetta_segment, ) @@ -95,7 +94,9 @@ def test_segment_from_annotation( def test_segment_from_annotation_fails_if_not_a_time_interval( sound_event_annotation: data.SoundEventAnnotation, ): - sound_event_annotation.sound_event.geometry = data.Point(coordinates=[0.5, 1]) + sound_event_annotation.sound_event.geometry = data.Point( + coordinates=[0.5, 1] + ) with pytest.raises(ValueError): crowsetta_io.segment_from_annotation( sound_event_annotation, @@ -106,7 +107,9 @@ def test_segment_from_annotation_fails_if_not_a_time_interval( def test_segment_from_annotation_casts_to_segment( sound_event_annotation: data.SoundEventAnnotation, ): - sound_event_annotation.sound_event.geometry = data.Point(coordinates=[0.5, 1]) + sound_event_annotation.sound_event.geometry = data.Point( + coordinates=[0.5, 1] + ) segment = crowsetta_io.segment_from_annotation( sound_event_annotation, cast_to_segment=True, diff --git a/tests/test_io/test_crowsetta/test_sequence.py b/tests/test_io/test_crowsetta/test_sequence.py index d02e5f6..5a652f8 100644 --- a/tests/test_io/test_crowsetta/test_sequence.py +++ b/tests/test_io/test_crowsetta/test_sequence.py @@ -5,7 +5,6 @@ import crowsetta import numpy as np import pytest - import soundevent.io.crowsetta as crowsetta_io from soundevent import data from soundevent.io.crowsetta.segment import create_crowsetta_segment @@ -170,5 +169,7 @@ def test_sequence_to_annotations( recording, ) assert len(annotations) == 2 - assert all(isinstance(ann, data.SoundEventAnnotation) for ann in annotations) + assert all( + isinstance(ann, data.SoundEventAnnotation) for ann in annotations + ) assert all(ann.sound_event.recording == recording for ann in annotations) diff --git a/tests/test_io/test_model_runs.py b/tests/test_io/test_model_runs.py index 8b33b49..d9716fd 100644 --- a/tests/test_io/test_model_runs.py +++ b/tests/test_io/test_model_runs.py @@ -138,7 +138,9 @@ def test_can_recover_processed_clip_tags( # Assert assert model_run == recovered assert recovered.clip_predictions[0].tags[0].tag.key == "species" - assert recovered.clip_predictions[0].tags[0].tag.value == "Myotis lucifugus" + assert ( + recovered.clip_predictions[0].tags[0].tag.value == "Myotis lucifugus" + ) assert recovered.clip_predictions[0].tags[0].score == 0.9 @@ -209,7 +211,10 @@ def test_can_recover_simple_predicted_sound_event( # Assert assert recovered.clip_predictions[0].sound_events[0].score == 0.9 - assert recovered.clip_predictions[0].sound_events[0].sound_event == sound_event + assert ( + recovered.clip_predictions[0].sound_events[0].sound_event + == sound_event + ) assert model_run == recovered @@ -249,7 +254,10 @@ def test_can_recover_predicted_sound_event_with_predicted_tags( recovered = io.load(path, type="model_run") # Assert - assert recovered.clip_predictions[0].sound_events[0].tags[0].tag.key == "species" + assert ( + recovered.clip_predictions[0].sound_events[0].tags[0].tag.key + == "species" + ) assert ( recovered.clip_predictions[0].sound_events[0].tags[0].tag.value == "Myotis lucifugus"