Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: Migrate Metrics models to pydantic v2 #1270

Merged
merged 8 commits into from Oct 4, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
38 changes: 19 additions & 19 deletions BALSAMIC/models/metrics.py
@@ -1,8 +1,8 @@
"""QC validation metrics model."""
import logging
from typing import Optional, Any, List
from typing import Optional, Any, List, Annotated

from pydantic.v1 import BaseModel, validator
from pydantic import BaseModel, AfterValidator

from BALSAMIC.constants.metrics import VALID_OPS

Expand Down Expand Up @@ -34,13 +34,26 @@ class Metric(BaseModel):
condition (MetricCondition, required) : Metric validation condition.
"""

header: Optional[str]
header: Optional[str] = None
id: str
input: str
name: str
step: str
value: Any = ...
condition: Optional[MetricCondition] = ...
value: Any
condition: Optional[MetricCondition]


def validate_metric(metric: Metric):
"""Checks if a metric meets its filtering condition."""
if metric.condition and not VALID_OPS[metric.condition.norm](
metric.value, metric.condition.threshold
):
raise ValueError(
f"QC metric {metric.name}: {metric.value} validation has failed. "
f"(Condition: {metric.condition.norm} {metric.condition.threshold}, ID: {metric.id})."
)
LOG.info(f"QC metric {metric.name}: {metric.value} meets its condition.")
return metric


class MetricValidation(BaseModel):
Expand All @@ -50,17 +63,4 @@ class MetricValidation(BaseModel):
metrics (List[Metric], required) : Metric model to validate.
"""

metrics: List[Metric]

@validator("metrics", each_item=True)
def validate_metrics(cls, metric):
"""Checks if a metric meets its filtering condition."""
if metric.condition and not VALID_OPS[metric.condition.norm](
metric.value, metric.condition.threshold
):
raise ValueError(
f"QC metric {metric.name}: {metric.value} validation has failed. "
f"(Condition: {metric.condition.norm} {metric.condition.threshold}, ID: {metric.id})."
)
LOG.info(f"QC metric {metric.name}: {metric.value} meets its condition.")
return metric
metrics: List[Annotated[Metric, AfterValidator(validate_metric)]]
1 change: 1 addition & 0 deletions CHANGELOG.rst
Expand Up @@ -49,6 +49,7 @@ Changed:
* Update `reference.json` file to use relative paths https://github.com/Clinical-Genomics/BALSAMIC/pull/1251
* Update pydantic to v2 while maintaining support for v1 models https://github.com/Clinical-Genomics/BALSAMIC/pull/1253
* `PCT_PF_READS_IMPROPER_PAIRS` QC threshold lowered to 5% https://github.com/Clinical-Genomics/BALSAMIC/issues/1265
* Migrate Metrics models to pydantic v2 https://github.com/Clinical-Genomics/BALSAMIC/pull/1270

Fixed:
^^^^^^
Expand Down
14 changes: 5 additions & 9 deletions tests/models/test_metric_models.py
Expand Up @@ -4,11 +4,7 @@

import pytest

from BALSAMIC.models.metrics import (
MetricValidation,
Metric,
MetricCondition,
)
from BALSAMIC.models.metrics import MetricValidation, Metric, MetricCondition


def test_metric_condition():
Expand All @@ -21,7 +17,7 @@ def test_metric_condition():
metric_model: MetricCondition = MetricCondition(**metric_condition)

# THEN assert retrieved values from the created model
assert metric_model.dict().items() == metric_condition.items()
assert metric_model.model_dump() == metric_condition


def test_metric_pass_validation():
Expand All @@ -42,7 +38,7 @@ def test_metric_pass_validation():
metric_model: Metric = Metric(**metrics)

# THEN assert retrieved values from the created model
assert metric_model.dict().items() == metrics.items()
assert metric_model.model_dump() == metrics


def test_metric_fail_validation():
Expand All @@ -54,7 +50,7 @@ def test_metric_fail_validation():
# THEN the model raises an error due to an incomplete input
with pytest.raises(ValueError) as input_exc:
Metric(**invalid_input)
assert "field required" in str(input_exc.value)
assert "Field required" in str(input_exc.value)


def test_metric_validation_pass(qc_extracted_metrics: dict):
Expand All @@ -64,7 +60,7 @@ def test_metric_validation_pass(qc_extracted_metrics: dict):
model: MetricValidation = MetricValidation(metrics=qc_extracted_metrics)

# THEN assert retrieved values from the created model
assert model.dict()["metrics"] == qc_extracted_metrics
assert model.model_dump()["metrics"] == qc_extracted_metrics


def test_metric_validation_fail(qc_extracted_metrics: dict):
Expand Down