Skip to content

Commit

Permalink
Test and docstrings fixes. (#625)
Browse files Browse the repository at this point in the history
  • Loading branch information
czaloom committed Jun 14, 2024
1 parent 2a94e0a commit 89496db
Show file tree
Hide file tree
Showing 5 changed files with 63 additions and 62 deletions.
25 changes: 13 additions & 12 deletions client/valor/schemas/symbolic/collections.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,37 +279,38 @@ class Annotation(StaticCollection):
... )
Object-Detection Box
>>> annotation = Annotation.create(
>>> annotation = Annotation(
... labels=[Label(key="k1", value="v1")],
... bounding_box=box2,
... bounding_box=box2,
... )
Object-Detection Polygon
>>> annotation = Annotation.create(
>>> annotation = Annotation(
... labels=[Label(key="k1", value="v1")],
... polygon=BoundingPolygon(...),
... )
Object-Detection Raster
>>> annotation = Annotation.create(
Raster
>>> annotation = Annotation(
... labels=[Label(key="k1", value="v1")],
... raster=Raster(...),
... is_instance=True
... )
Semantic-Segmentation Raster
>>> annotation = Annotation.create(
Object-Detection with all supported Geometries defined.
>>> Annotation(
... labels=[Label(key="k1", value="v1")],
... bounding_box=Box(...),
... polygon=BoundingPolygon(...),
... raster=Raster(...),
... is_instance=False # or None
... is_instance=True,
... )
Defining all supported annotation types is allowed!
>>> Annotation.create(
Semantic-Segmentation Raster
>>> annotation = Annotation(
... labels=[Label(key="k1", value="v1")],
... bounding_box=Box(...),
... polygon=BoundingPolygon(...),
... raster=Raster(...),
... is_instance=False # or None
... )
"""

Expand Down
28 changes: 14 additions & 14 deletions integration_tests/client/metrics/test_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
Model,
Prediction,
)
from valor.enums import EvaluationStatus
from valor.enums import EvaluationStatus, MetricType
from valor.exceptions import ClientException, EvaluationRequestError


Expand Down Expand Up @@ -196,12 +196,12 @@ def test_evaluate_image_clf(
# check that metrics arg works correctly
selected_metrics = random.sample(
[
"Accuracy",
"ROCAUC",
"Precision",
"F1",
"Recall",
"PrecisionRecallCurve",
MetricType.Accuracy,
MetricType.ROCAUC,
MetricType.Precision,
MetricType.F1,
MetricType.Recall,
MetricType.PrecisionRecallCurve,
],
2,
)
Expand Down Expand Up @@ -1023,13 +1023,13 @@ def test_evaluate_classification_with_label_maps(
label_map=label_mapping,
pr_curve_max_examples=3,
metrics_to_return=[
"Precision",
"Recall",
"F1",
"Accuracy",
"ROCAUC",
"PrecisionRecallCurve",
"DetailedPrecisionRecallCurve",
MetricType.Precision,
MetricType.Recall,
MetricType.F1,
MetricType.Accuracy,
MetricType.ROCAUC,
MetricType.PrecisionRecallCurve,
MetricType.DetailedPrecisionRecallCurve,
],
)
assert eval_job.id
Expand Down
52 changes: 26 additions & 26 deletions integration_tests/client/metrics/test_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
Model,
Prediction,
)
from valor.enums import AnnotationType, EvaluationStatus, TaskType
from valor.enums import AnnotationType, EvaluationStatus, MetricType, TaskType
from valor.exceptions import ClientException
from valor.schemas import Box
from valor_api.backend import models
Expand Down Expand Up @@ -461,13 +461,13 @@ def test_evaluate_detection(
# check that metrics arg works correctly
selected_metrics = random.sample(
[
"AP",
"AR",
"mAP",
"APAveragedOverIOUs",
"mAR",
"mAPAveragedOverIOUs",
"PrecisionRecallCurve",
MetricType.AP,
MetricType.AR,
MetricType.mAP,
MetricType.APAveragedOverIOUs,
MetricType.mAR,
MetricType.mAPAveragedOverIOUs,
MetricType.PrecisionRecallCurve,
],
2,
)
Expand Down Expand Up @@ -918,7 +918,7 @@ def test_get_evaluations(
with pytest.raises(ClientException):
both_evaluations_from_evaluation_ids_sorted = client.get_evaluations(
evaluation_ids=[eval_job.id, eval_job2.id],
metrics_to_sort_by=["AP"],
metrics_to_sort_by=[MetricType.AP], # type: ignore - testing
)


Expand Down Expand Up @@ -1168,14 +1168,14 @@ def test_evaluate_detection_with_label_maps(
iou_thresholds_to_return=[0.1, 0.6],
pr_curve_max_examples=1,
metrics_to_return=[
"AP",
"AR",
"mAP",
"APAveragedOverIOUs",
"mAR",
"mAPAveragedOverIOUs",
"PrecisionRecallCurve",
"DetailedPrecisionRecallCurve",
MetricType.AP,
MetricType.AR,
MetricType.mAP,
MetricType.APAveragedOverIOUs,
MetricType.mAR,
MetricType.mAPAveragedOverIOUs,
MetricType.PrecisionRecallCurve,
MetricType.DetailedPrecisionRecallCurve,
],
)

Expand Down Expand Up @@ -1888,13 +1888,13 @@ def test_evaluate_detection_with_label_maps(
label_map=label_mapping,
recall_score_threshold=0.8,
metrics_to_return=[
"AP",
"AR",
"mAP",
"APAveragedOverIOUs",
"mAR",
"mAPAveragedOverIOUs",
"PrecisionRecallCurve",
MetricType.AP,
MetricType.AR,
MetricType.mAP,
MetricType.APAveragedOverIOUs,
MetricType.mAR,
MetricType.mAPAveragedOverIOUs,
MetricType.PrecisionRecallCurve,
],
)

Expand Down Expand Up @@ -2567,7 +2567,7 @@ def test_detailed_precision_recall_curve(
dataset,
pr_curve_max_examples=1,
metrics_to_return=[
"DetailedPrecisionRecallCurve",
MetricType.DetailedPrecisionRecallCurve,
],
)
eval_job.wait_for_completion(timeout=30)
Expand Down Expand Up @@ -2692,7 +2692,7 @@ def test_detailed_precision_recall_curve(
dataset,
pr_curve_max_examples=1,
metrics_to_return=[
"DetailedPrecisionRecallCurve",
MetricType.DetailedPrecisionRecallCurve,
],
pr_curve_iou_threshold=0.45, # actual IOU is .481
)
Expand Down
10 changes: 5 additions & 5 deletions integration_tests/client/metrics/test_evaluations.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
Model,
Prediction,
)
from valor.enums import EvaluationStatus
from valor.enums import EvaluationStatus, MetricType
from valor.exceptions import ClientException
from valor_api import crud, enums, schemas
from valor_api.backend import core
Expand Down Expand Up @@ -440,10 +440,10 @@ def test_get_sorted_evaluations(
evaluations = client.get_evaluations(
datasets=["clf_dataset", "seg_dataset", "det_dataset"],
metrics_to_sort_by=[
"mAPAveragedOverIOUs",
"Accuracy",
"mIOU",
],
MetricType.mAPAveragedOverIOUs,
MetricType.Accuracy,
MetricType.mIOU,
], # type: ignore - testing
)

with pytest.raises(ClientException):
Expand Down
10 changes: 5 additions & 5 deletions integration_tests/client/metrics/test_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import random

from valor import Client, Dataset, Datum, GroundTruth, Label, Model, Prediction
from valor.enums import EvaluationStatus
from valor.enums import EvaluationStatus, MetricType


def test_evaluate_segmentation(
Expand Down Expand Up @@ -55,7 +55,7 @@ def test_evaluate_segmentation(

# check that metrics arg works correctly
selected_metrics = random.sample(
["IOU", "mIOU"],
[MetricType.IOU, MetricType.mIOU],
1,
)
eval_job_random_metrics = model.evaluate_segmentation(
Expand Down Expand Up @@ -213,7 +213,7 @@ def test_evaluate_segmentation_with_label_maps(
# test only passing in one metric or the other
eval_job = model.evaluate_segmentation(
dataset,
metrics_to_return=["IOU"],
metrics_to_return=[MetricType.IOU],
label_map={
Label(key=f"k{i}", value=f"v{i}"): Label(key="foo", value="bar")
for i in range(1, 4)
Expand All @@ -225,12 +225,12 @@ def test_evaluate_segmentation_with_label_maps(

eval_job = model.evaluate_segmentation(
dataset,
metrics_to_return=["mIOU"],
metrics_to_return=[MetricType.mIOU],
label_map={
Label(key=f"k{i}", value=f"v{i}"): Label(key="foo", value="bar")
for i in range(1, 4)
},
)

assert eval_job.wait_for_completion(timeout=30) == EvaluationStatus.DONE
assert set([m["type"] for m in eval_job.metrics]) == set(["mIOU"])
assert set([m["type"] for m in eval_job.metrics]) == set([MetricType.mIOU])

0 comments on commit 89496db

Please sign in to comment.