Skip to content

Commit

Permalink
migrated formatting to ruff
Browse files Browse the repository at this point in the history
  • Loading branch information
mbsantiago committed May 10, 2024
1 parent 9e4df0b commit 3b8b077
Show file tree
Hide file tree
Showing 95 changed files with 582 additions and 257 deletions.
4 changes: 0 additions & 4 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,6 @@ jobs:
python -m pip install --upgrade pip
python -m pip install pytest hypothesis ruff mypy black html5lib
python -m pip install ".[all]"
- name: Check format is correct
run: |
black --check src
black --check tests
- name: Make sure types are consistent
run: mypy --ignore-missing-imports src
- name: Lint with ruff
Expand Down
2 changes: 1 addition & 1 deletion docs/user_guide/2_loading_audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
[`xarray.DataArray`][xarray.DataArray] objects to hold loaded audio data.
[`xarray.DataArray`][xarray.DataArray] objects are an extension of
[`numpy`][numpy.ndarray] arrays, so there's no need to learn new concepts
if you are already familiar with [`numpy`][numpy.ndarray] arrays.
if you are already familiar with [`numpy`][numpy.ndarray] arrays.
!!! note "Why use `xarray.DataArray` objects?"
Expand Down
4 changes: 3 additions & 1 deletion src/soundevent/arrays/dimensions.py
Original file line number Diff line number Diff line change
Expand Up @@ -493,7 +493,9 @@ def get_dim_step(
return attrs[DimAttrs.step.value]

if not estimate_step:
raise ValueError(f"Step size not found in the '{dim}' dimension attributes.")
raise ValueError(
f"Step size not found in the '{dim}' dimension attributes."
)

return estimate_dim_step(
coord.data,
Expand Down
18 changes: 14 additions & 4 deletions src/soundevent/arrays/operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,11 @@
from numpy.typing import DTypeLike
from xarray.core.types import InterpOptions

from soundevent.arrays.dimensions import create_range_dim, get_dim_range, get_dim_step
from soundevent.arrays.dimensions import (
create_range_dim,
get_dim_range,
get_dim_step,
)

__all__ = [
"center",
Expand Down Expand Up @@ -88,7 +92,9 @@ def crop_dim(
stop = current_stop

if start > stop:
raise ValueError(f"Start value {start} must be less than stop value {stop}")
raise ValueError(
f"Start value {start} must be less than stop value {stop}"
)

if start < current_start or stop > current_stop:
raise ValueError(
Expand Down Expand Up @@ -174,7 +180,9 @@ def extend_dim(
stop = current_stop

if start > stop:
raise ValueError(f"Start value {start} must be less than stop value {stop}")
raise ValueError(
f"Start value {start} must be less than stop value {stop}"
)

step = get_dim_step(arr, dim)

Expand Down Expand Up @@ -304,7 +312,9 @@ def set_value_at_pos(
start, stop = get_dim_range(array, dim)

if coord < start or coord > stop:
raise KeyError(f"Position {coord} is outside the range of dimension {dim}.")
raise KeyError(
f"Position {coord} is outside the range of dimension {dim}."
)

index = array.indexes[dim].get_slice_bound(coord, "right")
indexer[dims[dim]] = index - 1
Expand Down
3 changes: 2 additions & 1 deletion src/soundevent/audio/chunks.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,8 @@ def _read_chunk(riff: BinaryIO) -> Optional[Chunk]:

if chunk_id in CHUNKS_WITH_SUBCHUNKS:
chunk.subchunks = {
subchunk.chunk_id: subchunk for subchunk in _get_subchunks(riff, size - 4)
subchunk.chunk_id: subchunk
for subchunk in _get_subchunks(riff, size - 4)
}
else:
riff.seek(size, os.SEEK_CUR)
Expand Down
4 changes: 3 additions & 1 deletion src/soundevent/audio/filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ def _get_filter(
order: int = 5,
) -> np.ndarray:
if low_freq is None and high_freq is None:
raise ValueError("At least one of low_freq and high_freq must be specified.")
raise ValueError(
"At least one of low_freq and high_freq must be specified."
)

if low_freq is None:
# Low pass filter
Expand Down
5 changes: 3 additions & 2 deletions src/soundevent/audio/media_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,9 @@ def get_media_info(path: PathLike) -> MediaInfo:
# chunk is the size of the data subchunk divided by the number
# of channels and the bit depth.
data_chunk = chunk.subchunks["data"]
samples = 8 * data_chunk.size // (fmt_info.channels * fmt_info.bit_depth)
samples = (
8 * data_chunk.size // (fmt_info.channels * fmt_info.bit_depth)
)
duration = samples / fmt_info.samplerate

return MediaInfo(
Expand Down Expand Up @@ -224,7 +226,6 @@ def generate_wav_header(
The structure of the WAV header is described in
(WAV PCM soundfile format)[http://soundfile.sapp.org/doc/WaveFormat/].
"""

data_size = samples * channels * bit_depth // 8
byte_rate = samplerate * channels * bit_depth // 8
block_align = channels * bit_depth // 8
Expand Down
8 changes: 6 additions & 2 deletions src/soundevent/audio/spectrum.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,9 @@ def pcen_core(
raise ValueError(f"eps={eps} must be strictly positive")

if time_constant <= 0:
raise ValueError(f"time_constant={time_constant} must be strictly positive")
raise ValueError(
f"time_constant={time_constant} must be strictly positive"
)

if b is None:
t_frames = time_constant * sr / float(hop_length)
Expand All @@ -144,7 +146,9 @@ def pcen_core(
if max_size == 1:
ref = S
elif S.ndim == 1:
raise ValueError("Max-filtering cannot be applied to 1-dimensional input")
raise ValueError(
"Max-filtering cannot be applied to 1-dimensional input"
)
else:
if max_axis is None:
if S.ndim != 2:
Expand Down
4 changes: 3 additions & 1 deletion src/soundevent/data/annotation_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,4 +28,6 @@ class AnnotationSet(BaseModel):
default_factory=list,
repr=False,
)
created_on: datetime.datetime = Field(default_factory=datetime.datetime.now)
created_on: datetime.datetime = Field(
default_factory=datetime.datetime.now
)
8 changes: 6 additions & 2 deletions src/soundevent/data/annotation_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,15 @@ class StatusBadge(BaseModel):

state: AnnotationState
owner: Optional[User] = None
created_on: datetime.datetime = Field(default_factory=datetime.datetime.now)
created_on: datetime.datetime = Field(
default_factory=datetime.datetime.now
)


class AnnotationTask(BaseModel):
uuid: UUID = Field(default_factory=uuid4, repr=False)
clip: Clip
status_badges: List[StatusBadge] = Field(default_factory=list)
created_on: datetime.datetime = Field(default_factory=datetime.datetime.now)
created_on: datetime.datetime = Field(
default_factory=datetime.datetime.now
)
8 changes: 6 additions & 2 deletions src/soundevent/data/clip_annotations.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,9 @@ class ClipAnnotation(BaseModel):
annotations
A list of Annotation instances representing detailed annotations of
sound events in the clip.
notes
Notes
-----
A list of Note instances representing additional contextual
information or remarks associated with the clip.
"""
Expand All @@ -54,4 +56,6 @@ class ClipAnnotation(BaseModel):
sequences: List[SequenceAnnotation] = Field(default_factory=list)
tags: List[Tag] = Field(default_factory=list)
notes: List[Note] = Field(default_factory=list)
created_on: datetime.datetime = Field(default_factory=datetime.datetime.now)
created_on: datetime.datetime = Field(
default_factory=datetime.datetime.now
)
8 changes: 6 additions & 2 deletions src/soundevent/data/clip_evaluations.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,13 +95,17 @@ def _check_matches(self):
}

match_targets = [
match.target.uuid for match in self.matches if match.target is not None
match.target.uuid
for match in self.matches
if match.target is not None
]

match_targets_set = set(match_targets)

match_sources = [
match.source.uuid for match in self.matches if match.source is not None
match.source.uuid
for match in self.matches
if match.source is not None
]

match_sources_set = set(match_sources)
Expand Down
4 changes: 3 additions & 1 deletion src/soundevent/data/evaluations.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@ class Evaluation(BaseModel):
"""Evaluation Class."""

uuid: UUID = Field(default_factory=uuid4, repr=False)
created_on: datetime.datetime = Field(default_factory=datetime.datetime.now)
created_on: datetime.datetime = Field(
default_factory=datetime.datetime.now
)
evaluation_task: str
clip_evaluations: Sequence[ClipEvaluation] = Field(default_factory=list)
metrics: Sequence[Feature] = Field(default_factory=list)
Expand Down
36 changes: 26 additions & 10 deletions src/soundevent/data/geometries.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,9 @@ def _validate_time_interval(cls, v: List[Time]) -> List[Time]:
after the end time).
"""
if len(v) != 2:
raise ValueError("The time interval must have exactly two time stamps.")
raise ValueError(
"The time interval must have exactly two time stamps."
)

if v[0] > v[1]:
raise ValueError("The start time must be before the end time.")
Expand Down Expand Up @@ -323,7 +325,9 @@ def _validate_coordinates(cls, v: List[float]) -> List[float]:
raise ValueError("The time must be positive.")

if frequency < 0 or frequency > MAX_FREQUENCY:
raise ValueError(f"The frequency must be between 0 and {MAX_FREQUENCY}.")
raise ValueError(
f"The frequency must be between 0 and {MAX_FREQUENCY}."
)

return v

Expand Down Expand Up @@ -469,7 +473,8 @@ def _validate_coordinates(

if frequency < 0 or frequency > MAX_FREQUENCY:
raise ValueError(
f"The frequency must be between 0 and " f"{MAX_FREQUENCY}."
f"The frequency must be between 0 and "
f"{MAX_FREQUENCY}."
)

return v
Expand Down Expand Up @@ -527,7 +532,9 @@ def _validate_coordinates(cls, v: List[float]) -> List[float]:
negative or the frequency is outside the valid range).
"""
if len(v) != 4:
raise ValueError("The bounding box must have exactly four coordinates.")
raise ValueError(
"The bounding box must have exactly four coordinates."
)

start_time, low_freq, end_time, high_freq = v

Expand All @@ -551,7 +558,9 @@ def _validate_coordinates(cls, v: List[float]) -> List[float]:
raise ValueError("The start time must be before the end time.")

if low_freq > high_freq:
raise ValueError("The start frequency must be before the end frequency.")
raise ValueError(
"The start frequency must be before the end frequency."
)

return v

Expand Down Expand Up @@ -762,23 +771,28 @@ def _validate_coordinates(
negative or the frequency is outside the valid range).
"""
if len(v) < 1:
raise ValueError("The multipolygon must have at least one polygon.")
raise ValueError(
"The multipolygon must have at least one polygon."
)

for polygon in v:
if len(polygon) < 1:
raise ValueError("Each polygon must have at least one ring.")

for ring in polygon:
if len(ring) < 3:
raise ValueError("Each ring must have at least three points.")
raise ValueError(
"Each ring must have at least three points."
)

for time, frequency in ring:
if time < 0:
raise ValueError("The time must be positive.")

if frequency < 0 or frequency > MAX_FREQUENCY:
raise ValueError(
f"The frequency must be between 0 and " f"{MAX_FREQUENCY}."
f"The frequency must be between 0 and "
f"{MAX_FREQUENCY}."
)

return v
Expand Down Expand Up @@ -894,7 +908,7 @@ def geometry_validate(
if not hasattr(obj, "type"):
raise ValueError(f"Object {obj} does not have a type attribute.")

geom_type = getattr(obj, "type")
geom_type = obj.type

if geom_type not in GEOMETRY_MAPPING:
raise ValueError(f"Object {obj} does not have a geometry valid type.")
Expand All @@ -907,4 +921,6 @@ def geometry_validate(
from_attributes=mode == "attributes",
)
except ValidationError as error:
raise ValueError(f"Object {obj} is not a valid {geom_type}.") from error
raise ValueError(
f"Object {obj} is not a valid {geom_type}."
) from error
4 changes: 3 additions & 1 deletion src/soundevent/data/notes.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,9 @@ class Note(BaseModel):
message: str
created_by: Optional[User] = None
is_issue: bool = False
created_on: datetime.datetime = Field(default_factory=datetime.datetime.now)
created_on: datetime.datetime = Field(
default_factory=datetime.datetime.now
)

def __hash__(self):
"""Hash the Note object."""
Expand Down
4 changes: 3 additions & 1 deletion src/soundevent/data/prediction_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,4 +73,6 @@ class PredictionSet(BaseModel):

uuid: UUID = Field(default_factory=uuid4)
clip_predictions: List[ClipPrediction] = Field(default_factory=list)
created_on: datetime.datetime = Field(default_factory=datetime.datetime.now)
created_on: datetime.datetime = Field(
default_factory=datetime.datetime.now
)
4 changes: 3 additions & 1 deletion src/soundevent/data/recording_sets.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,6 @@
class RecordingSet(BaseModel):
uuid: UUID = Field(default_factory=uuid4)
recordings: List[Recording] = Field(default_factory=list, repr=False)
created_on: datetime.datetime = Field(default_factory=datetime.datetime.now)
created_on: datetime.datetime = Field(
default_factory=datetime.datetime.now
)
5 changes: 4 additions & 1 deletion src/soundevent/data/recordings.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,10 @@ def from_file(
Recording
The recording object.
"""
from soundevent.audio.media_info import compute_md5_checksum, get_media_info
from soundevent.audio.media_info import (
compute_md5_checksum,
get_media_info,
)

media_info = get_media_info(path)

Expand Down
4 changes: 3 additions & 1 deletion src/soundevent/data/sequence_annotations.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@ class SequenceAnnotation(BaseModel):
A unique identifier for the annotation.
sequence
The sequence being annotated.
notes
Notes
-----
A list of notes associated with the sequence.
tags
The tags attached to the sequence providing semantic information.
Expand Down
4 changes: 3 additions & 1 deletion src/soundevent/data/sound_event_annotations.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,9 @@ class SoundEventAnnotation(BaseModel):
being annotated. Sound events define distinct audio occurrences, such as bird
calls or animal vocalizations, and are essential for categorizing the content
of the audio data.
notes
Notes
-----
A list of `Note` instances representing additional contextual information or
remarks associated with the annotation. Notes can provide insights into specific
characteristics of the sound event, aiding in the comprehensive understanding
Expand Down
Loading

0 comments on commit 3b8b077

Please sign in to comment.