diff --git a/api/tests/functional-tests/backend/metrics/test_detection.py b/api/tests/functional-tests/backend/metrics/test_detection.py index c83efa3a9..f19d42e5d 100644 --- a/api/tests/functional-tests/backend/metrics/test_detection.py +++ b/api/tests/functional-tests/backend/metrics/test_detection.py @@ -571,8 +571,8 @@ def _metric_to_dict(m) -> dict: {"iou": 0.75, "value": 1.0, "label": {"key": "class", "value": "4"}}, ] expected_map_metrics = [ - {"iou": 0.5, "value": 0.859}, - {"iou": 0.75, "value": 0.761}, + {"iou": 0.5, "value": 0.859, "label_key": "class"}, + {"iou": 0.75, "value": 0.761, "label_key": "class"}, ] expected_ap_metrics_ave_over_ious = [ { @@ -602,7 +602,7 @@ def _metric_to_dict(m) -> dict: }, ] expected_map_metrics_ave_over_ious = [ - {"ious": iou_thresholds, "value": 0.637} + {"ious": iou_thresholds, "value": 0.637, "label_key": "class"} ] expected_ar_metrics = [ { @@ -637,10 +637,7 @@ def _metric_to_dict(m) -> dict: }, ] expected_mar_metrics = [ - { - "ious": iou_thresholds, - "value": 0.652, - }, + {"ious": iou_thresholds, "value": 0.652, "label_key": "class"}, ] for metric_type, actual_metrics, expected_metrics in [ @@ -809,13 +806,10 @@ def test__compute_detection_metrics_with_rasters( "label": {"key": "class", "value": "label3"}, }, # mAP METRICS - {"iou": 0.5, "value": 0.667}, - {"iou": 0.75, "value": 0.667}, + {"iou": 0.5, "value": 0.667, "label_key": "class"}, + {"iou": 0.75, "value": 0.667, "label_key": "class"}, # mAP METRICS AVERAGED OVER IOUS - { - "ious": iou_thresholds, - "value": 0.667, - }, + {"ious": iou_thresholds, "value": 0.667, "label_key": "class"}, # AR METRICS { "ious": iou_thresholds, @@ -833,10 +827,7 @@ def test__compute_detection_metrics_with_rasters( "label": {"key": "class", "value": "label3"}, }, # mAR METRICS - { - "ious": iou_thresholds, - "value": 0.667, - }, + {"ious": iou_thresholds, "value": 0.667, "label_key": "class"}, ] non_pr_metrics = metrics[:-1] diff --git a/api/tests/functional-tests/backend/metrics/test_segmentation.py b/api/tests/functional-tests/backend/metrics/test_segmentation.py index f18c08740..891d560f7 100644 --- a/api/tests/functional-tests/backend/metrics/test_segmentation.py +++ b/api/tests/functional-tests/backend/metrics/test_segmentation.py @@ -416,13 +416,13 @@ def test__compute_segmentation_metrics( prediction_filter=prediction_filter, groundtruth_filter=groundtruth_filter, ) - # should have five metrics (one IOU for each of the four labels, and one mIOU) - assert len(metrics) == 5 - for metric in metrics[:-1]: + # should have five metrics (one IOU for each of the four labels, and three mIOUs) + assert len(metrics) == 7 + for metric in metrics[:-3]: assert isinstance(metric, schemas.IOUMetric) assert metric.value < 1.0 - assert isinstance(metrics[-1], schemas.mIOUMetric) - assert metrics[-1].value < 1.0 + assert all([isinstance(m, schemas.mIOUMetric) for m in metrics[-3:]]) + assert all([m.value < 1.0 for m in metrics[-3:]]) def test_compute_semantic_segmentation_metrics( @@ -477,11 +477,18 @@ def test_compute_semantic_segmentation_metrics( schemas.Label(key="k1", value="v1", score=None): 0.33, } + expected_mIOU_metrics = {"k1": (0.33 + 0) / 2, "k2": 0, "k3": 0} + assert metrics for metric in metrics: assert isinstance(metric.value, float) if metric.type == "mIOU": - assert (metric.value - 0.084) <= 0.01 + assert metric.parameters + assert metric.parameters["label_key"] + assert ( + metric.value + - expected_mIOU_metrics[metric.parameters["label_key"]] + ) <= 0.01 else: # the IOU value for (k1, v1) is bound between .327 and .336 assert metric.label diff --git a/api/tests/unit-tests/schemas/test_metrics.py b/api/tests/unit-tests/schemas/test_metrics.py index 7e8d9b941..2613f4284 100644 --- a/api/tests/unit-tests/schemas/test_metrics.py +++ b/api/tests/unit-tests/schemas/test_metrics.py @@ -84,16 +84,19 @@ def test_APMetricAveragedOverIOUs(): def test_mAPMetric(): - map_metric = schemas.mAPMetric(iou=0.2, value=0.5) + map_metric = schemas.mAPMetric(iou=0.2, value=0.5, label_key="key") with pytest.raises(ValidationError): - schemas.mAPMetric(iou=None, value=0.5) # type: ignore - purposefully throwing error + schemas.mAPMetric(iou=None, value=0.5, label_key="key") # type: ignore - purposefully throwing error with pytest.raises(ValidationError): - schemas.mAPMetric(iou=0.1, value=None) # type: ignore - purposefully throwing error + schemas.mAPMetric(iou=0.1, value=None, label_key="key") # type: ignore - purposefully throwing error with pytest.raises(ValidationError): - schemas.mAPMetric(iou=0.1, value="value") # type: ignore - purposefully throwing error + schemas.mAPMetric(iou=0.1, value="value", label_key="key") # type: ignore - purposefully throwing error + + with pytest.raises(ValidationError): + schemas.mAPMetric(iou=0.1, value=0.5, label_key=None) # type: ignore - purposefully throwing error assert all( [ @@ -105,17 +108,22 @@ def test_mAPMetric(): def test_mAPMetricAveragedOverIOUs(): map_averaged_metric = schemas.mAPMetricAveragedOverIOUs( - ious=set([0.1, 0.2]), value=0.5 + ious=set([0.1, 0.2]), value=0.5, label_key="key" ) with pytest.raises(ValidationError): - schemas.mAPMetricAveragedOverIOUs(ious=None, value=0.5) # type: ignore - purposefully throwing error + schemas.mAPMetricAveragedOverIOUs(ious=None, value=0.5, label_key="key") # type: ignore - purposefully throwing error + + with pytest.raises(ValidationError): + schemas.mAPMetricAveragedOverIOUs(ious=set([0.1, 0.2]), value=None, label_key="key") # type: ignore - purposefully throwing error with pytest.raises(ValidationError): - schemas.mAPMetricAveragedOverIOUs(ious=set([0.1, 0.2]), value=None) # type: ignore - purposefully throwing error + schemas.mAPMetricAveragedOverIOUs(ious=set([0.1, 0.2]), value="value", label_key="key") # type: ignore - purposefully throwing error with pytest.raises(ValidationError): - schemas.mAPMetricAveragedOverIOUs(ious=set([0.1, 0.2]), value="value") # type: ignore - purposefully throwing error + map_averaged_metric = schemas.mAPMetricAveragedOverIOUs( + ious=set([0.1, 0.2]), value=0.5, label_key=None # type: ignore - purposefully throwing error + ) assert all( [ @@ -357,17 +365,20 @@ def test_IOUMetric(): def test_mIOUMetric(): - iou_metric = schemas.mIOUMetric(value=0.2) + iou_metric = schemas.mIOUMetric(value=0.2, label_key="key") + + with pytest.raises(ValidationError): + schemas.mIOUMetric(value=None, label_key="key") # type: ignore - purposefully throwing error with pytest.raises(ValidationError): - schemas.mIOUMetric(value=None) # type: ignore - purposefully throwing error + schemas.mIOUMetric(value="not a value", label_key="key") # type: ignore - purposefully throwing error with pytest.raises(ValidationError): schemas.mIOUMetric(value="not a value") # type: ignore - purposefully throwing error assert all( [ - key in ["value", "type", "evaluation_id"] + key in ["value", "type", "evaluation_id", "parameters"] for key in iou_metric.db_mapping(evaluation_id=1) ] ) diff --git a/api/valor_api/backend/metrics/detection.py b/api/valor_api/backend/metrics/detection.py index 3d7ea9836..c27a5cdd9 100644 --- a/api/valor_api/backend/metrics/detection.py +++ b/api/valor_api/backend/metrics/detection.py @@ -785,18 +785,22 @@ def _compute_mean_ar_metrics( if len(ar_metrics) == 0: return [] - ious_to_values = defaultdict(list) + value_dict = defaultdict(lambda: defaultdict(list)) for metric in ar_metrics: - ious_to_values[frozenset(metric.ious)].append(metric.value) + value_dict[metric.label.key][frozenset(metric.ious)].append( + metric.value + ) mean_metrics = [] - for ious in ious_to_values.keys(): - mean_metrics.append( - schemas.mARMetric( - ious=ious, - value=_average_ignore_minus_one(ious_to_values[ious]), + for label_key, nested_dict in value_dict.items(): + for ious, values in nested_dict.items(): + mean_metrics.append( + schemas.mARMetric( + ious=ious, + value=_average_ignore_minus_one(values), + label_key=label_key, + ) ) - ) return mean_metrics @@ -810,29 +814,37 @@ def _compute_mean_detection_metrics_from_aps( return [] # dictionary for mapping an iou threshold to set of APs - vals = {} + vals = defaultdict(lambda: defaultdict(list)) for ap in ap_scores: if hasattr(ap, "iou"): iou = ap.iou # type: ignore - pyright doesn't consider hasattr checks else: iou = frozenset(ap.ious) # type: ignore - pyright doesn't consider hasattr checks - if iou not in vals: - vals[iou] = [] - vals[iou].append(ap.value) + vals[ap.label.key][iou].append(ap.value) # get mAP metrics at the individual IOUs - mean_detection_metrics = [ - ( - schemas.mAPMetric( - iou=iou, value=_average_ignore_minus_one(vals[iou]) - ) - if isinstance(iou, float) - else schemas.mAPMetricAveragedOverIOUs( - ious=iou, value=_average_ignore_minus_one(vals[iou]) - ) - ) - for iou in vals.keys() - ] + mean_detection_metrics = [] + + for label_key, nested_dict in vals.items(): + for iou, values in nested_dict.items(): + if isinstance(iou, float): + mean_detection_metrics.append( + schemas.mAPMetric( + iou=iou, + value=_average_ignore_minus_one(values), + label_key=label_key, + ) + ) + else: + mean_detection_metrics.append( + schemas.mAPMetricAveragedOverIOUs( + ious=iou, + value=_average_ignore_minus_one( + values, + ), + label_key=label_key, + ) + ) return mean_detection_metrics diff --git a/api/valor_api/backend/metrics/segmentation.py b/api/valor_api/backend/metrics/segmentation.py index 93077213d..cdcba3534 100644 --- a/api/valor_api/backend/metrics/segmentation.py +++ b/api/valor_api/backend/metrics/segmentation.py @@ -1,3 +1,5 @@ +from collections import defaultdict + from geoalchemy2.functions import ST_Count, ST_MapAlgebra from sqlalchemy.orm import Session, aliased from sqlalchemy.sql import Select, func, select @@ -178,6 +180,7 @@ def _compute_segmentation_metrics( ) ret = [] + ious_per_grouper_key = defaultdict(list) for grouper_id, label_ids in grouper_mappings[ "grouper_id_to_label_ids_mapping" ].items(): @@ -185,14 +188,14 @@ def _compute_segmentation_metrics( groundtruth_filter.label_ids = [label_id for label_id in label_ids] prediction_filter.label_ids = [label_id for label_id in label_ids] - _compute_iou_score = _compute_iou( + computed_iou_score = _compute_iou( db, groundtruth_filter, prediction_filter, ) # only add an IOUMetric if the label ids associated with the grouper id have at least one gt raster - if _compute_iou_score is None: + if computed_iou_score is None: continue grouper_label = grouper_mappings[ @@ -202,19 +205,24 @@ def _compute_segmentation_metrics( ret.append( IOUMetric( label=grouper_label, - value=_compute_iou_score, + value=computed_iou_score, ) ) - ret.append( + ious_per_grouper_key[grouper_label.key].append(computed_iou_score) + + # aggregate IOUs by key + ret += [ mIOUMetric( value=( - sum([metric.value for metric in ret]) / len(ret) - if len(ret) != 0 + sum(iou_values) / len(iou_values) + if len(iou_values) != 0 else -1 - ) + ), + label_key=grouper_key, ) - ) + for grouper_key, iou_values in ious_per_grouper_key.items() + ] return ret diff --git a/api/valor_api/schemas/metrics.py b/api/valor_api/schemas/metrics.py index 472739b91..7dfde7d78 100644 --- a/api/valor_api/schemas/metrics.py +++ b/api/valor_api/schemas/metrics.py @@ -162,10 +162,13 @@ class mARMetric(BaseModel): A set of intersect-over-union (IOU) values. value : float The value of the metric. + label_key : str + The label key associated with the metric. """ ious: set[float] value: float + label_key: str def db_mapping(self, evaluation_id: int) -> dict: """ @@ -186,6 +189,7 @@ def db_mapping(self, evaluation_id: int) -> dict: "evaluation_id": evaluation_id, "parameters": { "ious": list(self.ious), + "label_key": self.label_key, }, } @@ -200,10 +204,13 @@ class mAPMetric(BaseModel): The intersect-over-union (IOU) value. value : float The value of the metric. + label_key : str + The label key associated with the metric. """ iou: float value: float + label_key: str def db_mapping(self, evaluation_id: int) -> dict: """ @@ -222,7 +229,10 @@ def db_mapping(self, evaluation_id: int) -> dict: "value": self.value, "type": "mAP", "evaluation_id": evaluation_id, - "parameters": {"iou": self.iou}, + "parameters": { + "iou": self.iou, + "label_key": self.label_key, + }, } @@ -236,10 +246,13 @@ class mAPMetricAveragedOverIOUs(BaseModel): A set of intersect-over-union (IOU) values. value : float The value of the metric. + label_key : str + The label key associated with the metric. """ ious: set[float] value: float + label_key: str def db_mapping(self, evaluation_id: int) -> dict: """ @@ -258,7 +271,10 @@ def db_mapping(self, evaluation_id: int) -> dict: "value": self.value, "type": "mAPAveragedOverIOUs", "evaluation_id": evaluation_id, - "parameters": {"ious": list(self.ious)}, + "parameters": { + "ious": list(self.ious), + "label_key": self.label_key, + }, } @@ -373,7 +389,7 @@ class AccuracyMetric(BaseModel): Attributes ---------- label_key : str - A label for the metric. + The label key associated with the metric. value : float The metric value. """ @@ -409,7 +425,7 @@ class PrecisionRecallCurve(BaseModel): Attributes ---------- label_key: str - A label for the metric. + The label key associated with the metric. value: dict A nested dictionary where the first key is the class label, the second key is the confidence threshold (e.g., 0.05), the third key is the metric name (e.g., "precision"), and the final key is either the value itself (for precision, recall, etc.) or a list of tuples containing data for each observation. pr_curve_iou_threshold: float, optional @@ -528,7 +544,7 @@ class ROCAUCMetric(BaseModel): Attributes ---------- label_key : str - A label for the metric. + The label key associated with the metric. value : float The metric value. """ @@ -601,9 +617,12 @@ class mIOUMetric(BaseModel): ---------- value : float The metric value. + label_key : str + The label key associated with the metric. """ value: float + label_key: str def db_mapping(self, evaluation_id: int) -> dict: """ @@ -622,4 +641,5 @@ def db_mapping(self, evaluation_id: int) -> dict: "value": self.value, "type": "mIOU", "evaluation_id": evaluation_id, + "parameters": {"label_key": self.label_key}, } diff --git a/docs/metrics.md b/docs/metrics.md index 57817ceae..8bcbef402 100644 --- a/docs/metrics.md +++ b/docs/metrics.md @@ -19,10 +19,10 @@ If we're missing an important metric for your particular use case, please [write | :- | :- | :- | | Average Precision (AP) | The weighted mean of precisions achieved at several different recall thresholds for a single Intersection over Union (IOU), grouped by class. | See [AP methods](#average-precision-ap). | | AP Averaged Over IOUs | The average of several AP metrics across IOU thresholds, grouped by class labels. | $\dfrac{1}{\text{number of thresholds}} \sum\limits_{iou \in thresholds} AP_{iou}$ | -| Mean Average Precision (mAP) | The average of several AP metrics across class labels, grouped by IOU thresholds. | $\dfrac{1}{\text{number of classes}} \sum\limits_{c \in classes} AP_{c}$ | -| mAP Averaged Over IOUs | The average of several mAP metrics across class labels. | $\dfrac{1}{\text{number of thresholds}} \sum\limits_{iou \in thresholds} mAP_{iou}$ | +| Mean Average Precision (mAP) | The average of several AP metrics, grouped by label keys and IOU thresholds. | $\dfrac{1}{\text{number of labels}} \sum\limits_{label \in labels} AP_{c}$ | +| mAP Averaged Over IOUs | The average of several mAP metrics grouped by label keys. | $\dfrac{1}{\text{number of thresholds}} \sum\limits_{iou \in thresholds} mAP_{iou}$ | | Average Recall (AR) | The average of several recall metrics across IOU thresholds, grouped by class labels. | See [AR methods](#average-recall-ar). | -| Mean Average Recall (mAR) | The average of several AR metrics across class labels. | $\dfrac{1}{\text{number of classes}} \sum\limits_{class \in classes} AR_{class}$ | +| Mean Average Recall (mAR) | The average of several AR metrics, grouped by label keys. | $\dfrac{1}{\text{number of labels}} \sum\limits_{label \in labels} AR_{class}$ | | Precision-Recall Curves | Outputs a nested dictionary containing the true positives, false positives, true negatives, false negatives, precision, recall, and F1 score for each (label key, label value, confidence threshold) combination. Computing this output requires setting the `compute_pr_curves` argument to `True` at evaluation time. These curves are calculated using a default IOU threshold of 0.5; you can set your own threshold by passing a float between 0 and 1 to the `pr_curve_iou_threshold` parameter at evaluation time. | See [precision-recall curve methods](#precision-recall-curves)| @@ -33,7 +33,7 @@ If we're missing an important metric for your particular use case, please [write | Name | Description | Equation | | :- | :- | :- | | Intersection Over Union (IOU) | A ratio between the groundtruth and predicted regions of an image, measured as a percentage, grouped by class. |$\dfrac{area( prediction \cap groundtruth )}{area( prediction \cup groundtruth )}$ | -| Mean IOU | The average of IOUs, calculated over several different classes. | $\dfrac{1}{\text{number of classes}} \sum\limits_{c \in classes} IOU_{c}$ | +| Mean IOU | The average of IOU across labels, grouped by label key. | $\dfrac{1}{\text{number of labels}} \sum\limits_{label \in labels} IOU_{c}$ | # Appendix: Metric Calculations diff --git a/integration_tests/client/filtering/test_geospatial_filtering.py b/integration_tests/client/filtering/test_geospatial_filtering.py index 521a29f82..53a6e2b77 100644 --- a/integration_tests/client/filtering/test_geospatial_filtering.py +++ b/integration_tests/client/filtering/test_geospatial_filtering.py @@ -133,7 +133,7 @@ def test_geospatial_filter( }, ) assert eval_job.wait_for_completion(timeout=30) == EvaluationStatus.DONE - assert len(eval_job.metrics) == 12 + assert len(eval_job.metrics) == 16 # passing in an incorrectly-formatted geojson dict should return a ValueError geospatial_metadatum = Datum.metadata["geospatial"] @@ -158,7 +158,7 @@ def test_geospatial_filter( assert eval_job.datum_filter.datum_metadata["geospatial"] == [ Constraint(value=geodict, operator="intersect") ] - assert len(eval_job.metrics) == 12 + assert len(eval_job.metrics) == 16 # filtering by model is allowed, this is the equivalent of requesting.. # "Give me the dataset that model A has operated over." diff --git a/integration_tests/client/metrics/test_detection.py b/integration_tests/client/metrics/test_detection.py index cd358dab4..44f797a8a 100644 --- a/integration_tests/client/metrics/test_detection.py +++ b/integration_tests/client/metrics/test_detection.py @@ -85,15 +85,19 @@ def test_evaluate_detection( }, { "type": "mAP", - "parameters": {"iou": 0.1}, + "parameters": {"iou": 0.1, "label_key": "k1"}, "value": 0.504950495049505, }, { "type": "mAP", - "parameters": {"iou": 0.6}, + "parameters": {"iou": 0.6, "label_key": "k1"}, "value": 0.504950495049505, }, - {"type": "mAR", "parameters": {"ious": [0.1, 0.6]}, "value": 0.5}, + { + "type": "mAR", + "parameters": {"ious": [0.1, 0.6], "label_key": "k1"}, + "value": 0.5, + }, { "type": "APAveragedOverIOUs", "parameters": {"ious": [0.1, 0.6]}, @@ -102,7 +106,7 @@ def test_evaluate_detection( }, { "type": "mAPAveragedOverIOUs", - "parameters": {"ious": [0.1, 0.6]}, + "parameters": {"ious": [0.1, 0.6], "label_key": "k1"}, "value": 0.504950495049505, }, ] @@ -130,6 +134,7 @@ def test_evaluate_detection( assert result_dict["meta"]["annotations"] == 3 assert result_dict["meta"]["duration"] <= 5 result_dict.pop("meta") + actual_metrics = result_dict.pop("metrics") assert result_dict == { "id": eval_job.id, @@ -150,11 +155,14 @@ def test_evaluate_detection( "pr_curve_iou_threshold": 0.5, }, "status": EvaluationStatus.DONE.value, - "metrics": expected_metrics, "confusion_matrices": [], "missing_pred_labels": [], "ignored_pred_labels": [], } + for m in actual_metrics: + assert m in expected_metrics + for m in expected_metrics: + assert m in actual_metrics # test evaluating a job using a `Label.labels` filter eval_job_value_filter_using_in_ = model.evaluate_detection( @@ -170,7 +178,11 @@ def test_evaluate_detection( eval_job_value_filter_using_in_.wait_for_completion(timeout=30) == EvaluationStatus.DONE ) - assert eval_job_value_filter_using_in_.metrics == result.metrics + + for m in eval_job_value_filter_using_in_.metrics: + assert m in result.metrics + for m in result.metrics: + assert m in eval_job_value_filter_using_in_.metrics # same as the above, but not using the in_ operator eval_job_value_filter = model.evaluate_detection( @@ -186,7 +198,11 @@ def test_evaluate_detection( eval_job_value_filter.wait_for_completion(timeout=30) == EvaluationStatus.DONE ) - assert eval_job_value_filter.metrics == result.metrics + + for m in eval_job_value_filter.metrics: + assert m in result.metrics + for m in result.metrics: + assert m in eval_job_value_filter.metrics # assert that this evaluation returns no metrics as there aren't any # Labels with key=k1 and value=v2 @@ -231,6 +247,7 @@ def test_evaluate_detection( eval_job_bounded_area_10_2000.to_dict() ) eval_job_bounded_area_10_2000_dict.pop("meta") + actual_metrics = eval_job_bounded_area_10_2000_dict.pop("metrics") assert eval_job_bounded_area_10_2000_dict == { "id": eval_job_bounded_area_10_2000.id, "model_name": model_name, @@ -260,12 +277,16 @@ def test_evaluate_detection( "pr_curve_iou_threshold": 0.5, }, "status": EvaluationStatus.DONE.value, - "metrics": expected_metrics, "confusion_matrices": [], "missing_pred_labels": [], "ignored_pred_labels": [], } + for m in actual_metrics: + assert m in expected_metrics + for m in expected_metrics: + assert m in actual_metrics + # now check we get different things by setting the thresholds accordingly # min area threshold should divide the set of annotations eval_job_min_area_1200 = model.evaluate_detection( @@ -420,7 +441,10 @@ def test_evaluate_detection( "ignored_pred_labels": [], } assert bounded_area_metrics != expected_metrics - assert bounded_area_metrics == min_area_1200_metrics + for m in bounded_area_metrics: + assert m in min_area_1200_metrics + for m in min_area_1200_metrics: + assert m in bounded_area_metrics # test accessing these evaluations via the dataset all_evals = dataset.get_evaluations() @@ -582,7 +606,10 @@ def test_evaluate_detection_with_json_filters( "ignored_pred_labels": [], } assert bounded_area_metrics != expected_metrics - assert bounded_area_metrics == min_area_1200_metrics + for m in bounded_area_metrics: + assert m in min_area_1200_metrics + for m in min_area_1200_metrics: + assert m in bounded_area_metrics def test_get_evaluations( @@ -624,12 +651,6 @@ def test_get_evaluations( "value": 0.504950495049505, "label": {"key": "k1", "value": "v1"}, }, - { - "type": "AP", - "parameters": {"iou": 0.6}, - "value": 0.504950495049505, - "label": {"key": "k1", "value": "v1"}, - }, { "type": "AR", "parameters": {"ious": [0.1, 0.6]}, @@ -638,24 +659,34 @@ def test_get_evaluations( }, { "type": "mAP", - "parameters": {"iou": 0.1}, + "parameters": {"iou": 0.6, "label_key": "k1"}, "value": 0.504950495049505, }, { - "type": "mAP", - "parameters": {"iou": 0.6}, + "type": "APAveragedOverIOUs", + "parameters": {"ious": [0.1, 0.6]}, "value": 0.504950495049505, + "label": {"key": "k1", "value": "v1"}, }, - {"type": "mAR", "parameters": {"ious": [0.1, 0.6]}, "value": 0.5}, { - "type": "APAveragedOverIOUs", - "parameters": {"ious": [0.1, 0.6]}, + "type": "AP", + "parameters": {"iou": 0.6}, "value": 0.504950495049505, "label": {"key": "k1", "value": "v1"}, }, + { + "type": "mAP", + "parameters": {"iou": 0.1, "label_key": "k1"}, + "value": 0.504950495049505, + }, + { + "type": "mAR", + "parameters": {"ious": [0.1, 0.6], "label_key": "k1"}, + "value": 0.5, + }, { "type": "mAPAveragedOverIOUs", - "parameters": {"ious": [0.1, 0.6]}, + "parameters": {"ious": [0.1, 0.6], "label_key": "k1"}, "value": 0.504950495049505, }, ] @@ -663,13 +694,28 @@ def test_get_evaluations( second_model_expected_metrics = [ { "type": "AP", - "parameters": {"iou": 0.1}, + "parameters": {"iou": 0.6}, "value": 0.0, "label": {"key": "k1", "value": "v1"}, }, + { + "type": "mAP", + "parameters": {"iou": 0.1, "label_key": "k1"}, + "value": 0.0, + }, + { + "type": "mAR", + "parameters": {"ious": [0.1, 0.6], "label_key": "k1"}, + "value": 0.0, + }, + { + "type": "mAPAveragedOverIOUs", + "parameters": {"ious": [0.1, 0.6], "label_key": "k1"}, + "value": 0.0, + }, { "type": "AP", - "parameters": {"iou": 0.6}, + "parameters": {"iou": 0.1}, "value": 0.0, "label": {"key": "k1", "value": "v1"}, }, @@ -679,19 +725,16 @@ def test_get_evaluations( "value": 0.0, "label": {"key": "k1", "value": "v1"}, }, - {"type": "mAP", "parameters": {"iou": 0.1}, "value": 0.0}, - {"type": "mAP", "parameters": {"iou": 0.6}, "value": 0.0}, - {"type": "mAR", "parameters": {"ious": [0.1, 0.6]}, "value": 0.0}, { - "type": "APAveragedOverIOUs", - "parameters": {"ious": [0.1, 0.6]}, + "type": "mAP", + "parameters": {"iou": 0.6, "label_key": "k1"}, "value": 0.0, - "label": {"key": "k1", "value": "v1"}, }, { - "type": "mAPAveragedOverIOUs", + "type": "APAveragedOverIOUs", "parameters": {"ious": [0.1, 0.6]}, "value": 0.0, + "label": {"key": "k1", "value": "v1"}, }, ] @@ -705,7 +748,10 @@ def test_get_evaluations( assert len(evaluations) == 1 assert len(evaluations[0].metrics) - assert evaluations[0].metrics == expected_metrics + for m in evaluations[0].metrics: + assert m in expected_metrics + for m in expected_metrics: + assert m in evaluations[0].metrics evaluations_by_evaluation_id = client.get_evaluations( evaluation_ids=eval_job.id # type: ignore - purposefully throwing an error @@ -739,7 +785,10 @@ def test_get_evaluations( second_model_evaluations = client.get_evaluations(models=["second_model"]) assert len(second_model_evaluations) == 1 - assert second_model_evaluations[0].metrics == second_model_expected_metrics + for m in second_model_evaluations[0].metrics: + assert m in second_model_expected_metrics + for m in second_model_expected_metrics: + assert m in second_model_evaluations[0].metrics both_evaluations = client.get_evaluations(datasets=["test_dataset"]) @@ -751,9 +800,15 @@ def test_get_evaluations( model_name, ] if evaluation.model_name == model_name: - assert evaluation.metrics == expected_metrics + for m in evaluation.metrics: + assert m in expected_metrics + for m in expected_metrics: + assert m in evaluation.metrics elif evaluation.model_name == "second_model": - assert evaluation.metrics == second_model_expected_metrics + for m in evaluation.metrics: + assert m in second_model_expected_metrics + for m in second_model_expected_metrics: + assert m in evaluation.metrics # should be equivalent since there are only two models attributed to this dataset both_evaluations_from_model_names = client.get_evaluations( @@ -1002,17 +1057,66 @@ def test_evaluate_detection_with_label_maps( "value": 0.0, "label": {"key": "class", "value": "siamese cat"}, }, + { + "type": "mAR", + "parameters": {"ious": [0.1, 0.6], "label_key": "k1"}, + "value": 0.5, + }, + { + "type": "mAR", + "parameters": {"ious": [0.1, 0.6], "label_key": "k2"}, + "value": 0.0, + }, + { + "type": "mAR", + "parameters": {"ious": [0.1, 0.6], "label_key": "class_name"}, + "value": 0.0, + }, + { + "type": "mAR", + "parameters": {"ious": [0.1, 0.6], "label_key": "class"}, + "value": 0.0, + }, { "type": "mAP", - "parameters": {"iou": 0.1}, - "value": 0.100990099009901, + "parameters": {"iou": 0.1, "label_key": "class"}, + "value": 0.0, }, { "type": "mAP", - "parameters": {"iou": 0.6}, - "value": 0.100990099009901, + "parameters": {"iou": 0.6, "label_key": "class"}, + "value": 0.0, + }, + { + "type": "mAP", + "parameters": {"iou": 0.1, "label_key": "class_name"}, + "value": 0.0, + }, + { + "type": "mAP", + "parameters": {"iou": 0.6, "label_key": "class_name"}, + "value": 0.0, + }, + { + "type": "mAP", + "parameters": {"iou": 0.1, "label_key": "k1"}, + "value": 0.504950495049505, + }, + { + "type": "mAP", + "parameters": {"iou": 0.6, "label_key": "k1"}, + "value": 0.504950495049505, + }, + { + "type": "mAP", + "parameters": {"iou": 0.1, "label_key": "k2"}, + "value": 0.0, + }, + { + "type": "mAP", + "parameters": {"iou": 0.6, "label_key": "k2"}, + "value": 0.0, }, - {"type": "mAR", "parameters": {"ious": [0.1, 0.6]}, "value": 0.1}, { "type": "APAveragedOverIOUs", "parameters": {"ious": [0.1, 0.6]}, @@ -1045,8 +1149,23 @@ def test_evaluate_detection_with_label_maps( }, { "type": "mAPAveragedOverIOUs", - "parameters": {"ious": [0.1, 0.6]}, - "value": 0.100990099009901, + "parameters": {"ious": [0.1, 0.6], "label_key": "class"}, + "value": 0.0, + }, + { + "type": "mAPAveragedOverIOUs", + "parameters": {"ious": [0.1, 0.6], "label_key": "class_name"}, + "value": 0.0, + }, + { + "type": "mAPAveragedOverIOUs", + "parameters": {"ious": [0.1, 0.6], "label_key": "k1"}, + "value": 0.504950495049505, + }, + { + "type": "mAPAveragedOverIOUs", + "parameters": {"ious": [0.1, 0.6], "label_key": "k2"}, + "value": 0.0, }, ] @@ -1198,18 +1317,83 @@ def test_evaluate_detection_with_label_maps( }, { "type": "mAP", - "parameters": {"iou": 0.1}, - "value": 0.28052805280528054, + "parameters": {"iou": 0.1, "label_key": "class"}, + "value": 0.33663366336633666, }, { "type": "mAP", - "parameters": {"iou": 0.6}, - "value": 0.28052805280528054, + "parameters": {"iou": 0.1, "label_key": "k1"}, + "value": 0.504950495049505, + }, + { + "type": "mAP", + "parameters": {"iou": 0.1, "label_key": "k2"}, + "value": 0.0, + }, + { + "type": "mAP", + "parameters": {"iou": 0.6, "label_key": "class"}, + "value": 0.33663366336633666, + }, + { + "type": "mAP", + "parameters": {"iou": 0.6, "label_key": "k1"}, + "value": 0.504950495049505, + }, + { + "type": "mAP", + "parameters": {"iou": 0.6, "label_key": "k2"}, + "value": 0.0, + }, + { + "type": "mAP", + "parameters": {"iou": 0.1, "label_key": "class"}, + "value": 0.33663366336633666, + }, + { + "type": "mAP", + "parameters": {"iou": 0.1, "label_key": "k1"}, + "value": 0.504950495049505, + }, + { + "type": "mAP", + "parameters": {"iou": 0.1, "label_key": "k2"}, + "value": 0.0, + }, + { + "type": "mAP", + "parameters": {"iou": 0.6, "label_key": "class"}, + "value": 0.33663366336633666, + }, + { + "type": "mAP", + "parameters": {"iou": 0.6, "label_key": "k1"}, + "value": 0.504950495049505, + }, + { + "type": "mAP", + "parameters": {"iou": 0.6, "label_key": "k2"}, + "value": 0.0, }, { "type": "mAR", - "parameters": {"ious": [0.1, 0.6]}, - "value": 0.27777777777777773, + "parameters": {"ious": [0.1, 0.6], "label_key": "class"}, + "value": 0.3333333333333333, + }, + { + "type": "mAR", + "parameters": {"ious": [0.1, 0.6], "label_key": "k1"}, + "value": 0.5, + }, + { + "type": "mAR", + "parameters": {"ious": [0.1, 0.6], "label_key": "class_name"}, + "value": -1.0, + }, + { + "type": "mAR", + "parameters": {"ious": [0.1, 0.6], "label_key": "k2"}, + "value": 0.0, }, { "type": "APAveragedOverIOUs", @@ -1231,8 +1415,18 @@ def test_evaluate_detection_with_label_maps( }, { "type": "mAPAveragedOverIOUs", - "parameters": {"ious": [0.1, 0.6]}, - "value": 0.28052805280528054, + "parameters": {"ious": [0.1, 0.6], "label_key": "k1"}, + "value": 0.504950495049505, + }, + { + "type": "mAPAveragedOverIOUs", + "parameters": {"ious": [0.1, 0.6], "label_key": "class"}, + "value": 0.33663366336633666, + }, + { + "type": "mAPAveragedOverIOUs", + "parameters": {"ious": [0.1, 0.6], "label_key": "k2"}, + "value": 0.0, }, ] @@ -1297,6 +1491,55 @@ def test_evaluate_detection_with_label_maps( "value": 0.504950495049505, "label": {"key": "k1", "value": "v1"}, }, + { + "type": "AR", + "parameters": {"ious": [0.1, 0.6]}, + "value": 0.6666666666666666, + "label": {"key": "foo", "value": "bar"}, + }, + { + "type": "AR", + "parameters": {"ious": [0.1, 0.6]}, + "value": 0.5, + "label": {"key": "k1", "value": "v1"}, + }, + { + "type": "mAP", + "parameters": {"iou": 0.6, "label_key": "foo"}, + "value": 0.6633663366336634, + }, + { + "type": "mAP", + "parameters": {"iou": 0.6, "label_key": "k2"}, + "value": 0.0, + }, + { + "type": "mAP", + "parameters": {"iou": 0.6, "label_key": "k1"}, + "value": 0.504950495049505, + }, + { + "type": "mAR", + "parameters": {"ious": [0.1, 0.6], "label_key": "k2"}, + "value": 0.0, + }, + { + "type": "APAveragedOverIOUs", + "parameters": {"ious": [0.1, 0.6]}, + "value": 0.6633663366336634, + "label": {"key": "foo", "value": "bar"}, + }, + { + "type": "APAveragedOverIOUs", + "parameters": {"ious": [0.1, 0.6]}, + "value": 0.504950495049505, + "label": {"key": "k1", "value": "v1"}, + }, + { + "type": "mAPAveragedOverIOUs", + "parameters": {"ious": [0.1, 0.6], "label_key": "k2"}, + "value": 0.0, + }, { "type": "AP", "parameters": {"iou": 0.6}, @@ -1315,12 +1558,6 @@ def test_evaluate_detection_with_label_maps( "value": 0.504950495049505, "label": {"key": "k1", "value": "v1"}, }, - { - "type": "AR", - "parameters": {"ious": [0.1, 0.6]}, - "value": 0.6666666666666666, - "label": {"key": "foo", "value": "bar"}, - }, { "type": "AR", "parameters": {"ious": [0.1, 0.6]}, @@ -1328,31 +1565,29 @@ def test_evaluate_detection_with_label_maps( "label": {"key": "k2", "value": "v2"}, }, { - "type": "AR", - "parameters": {"ious": [0.1, 0.6]}, - "value": 0.5, - "label": {"key": "k1", "value": "v1"}, + "type": "mAP", + "parameters": {"iou": 0.1, "label_key": "foo"}, + "value": 0.6633663366336634, }, { "type": "mAP", - "parameters": {"iou": 0.1}, - "value": 0.3894389438943895, + "parameters": {"iou": 0.1, "label_key": "k2"}, + "value": 0.0, }, { "type": "mAP", - "parameters": {"iou": 0.6}, - "value": 0.3894389438943895, + "parameters": {"iou": 0.1, "label_key": "k1"}, + "value": 0.504950495049505, }, { "type": "mAR", - "parameters": {"ious": [0.1, 0.6]}, - "value": 0.38888888888888884, + "parameters": {"ious": [0.1, 0.6], "label_key": "foo"}, + "value": 0.6666666666666666, }, { - "type": "APAveragedOverIOUs", - "parameters": {"ious": [0.1, 0.6]}, - "value": 0.6633663366336634, - "label": {"key": "foo", "value": "bar"}, + "type": "mAR", + "parameters": {"ious": [0.1, 0.6], "label_key": "k1"}, + "value": 0.5, }, { "type": "APAveragedOverIOUs", @@ -1361,15 +1596,14 @@ def test_evaluate_detection_with_label_maps( "label": {"key": "k2", "value": "v2"}, }, { - "type": "APAveragedOverIOUs", - "parameters": {"ious": [0.1, 0.6]}, - "value": 0.504950495049505, - "label": {"key": "k1", "value": "v1"}, + "type": "mAPAveragedOverIOUs", + "parameters": {"ious": [0.1, 0.6], "label_key": "foo"}, + "value": 0.6633663366336634, }, { "type": "mAPAveragedOverIOUs", - "parameters": {"ious": [0.1, 0.6]}, - "value": 0.3894389438943895, + "parameters": {"ious": [0.1, 0.6], "label_key": "k1"}, + "value": 0.504950495049505, }, ] @@ -1425,6 +1659,12 @@ def test_evaluate_detection_with_label_maps( "value": 0.6633663366336634, "label": {"key": "foo", "value": "bar"}, }, + { + "type": "AP", + "parameters": {"iou": 0.1}, + "value": 0.504950495049505, + "label": {"key": "k1", "value": "v1"}, + }, { "type": "AP", "parameters": {"iou": 0.1}, @@ -1432,22 +1672,32 @@ def test_evaluate_detection_with_label_maps( "label": {"key": "k2", "value": "v2"}, }, { - "type": "AP", - "parameters": {"iou": 0.6}, - "value": 0.6633663366336634, + "type": "AR", + "parameters": {"ious": [0.1, 0.6]}, + "value": 0.3333333333333333, # two missed groundtruth on the first image, and 1 hit for the second image "label": {"key": "foo", "value": "bar"}, }, { - "type": "AP", - "parameters": {"iou": 0.6}, + "type": "AR", + "parameters": {"ious": [0.1, 0.6]}, "value": 0.0, "label": {"key": "k2", "value": "v2"}, }, { - "type": "AP", - "parameters": {"iou": 0.1}, + "type": "mAP", + "parameters": {"iou": 0.6, "label_key": "foo"}, + "value": 0.6633663366336634, + }, + { + "type": "mAP", + "parameters": {"iou": 0.6, "label_key": "k1"}, "value": 0.504950495049505, - "label": {"key": "k1", "value": "v1"}, + }, + { + "type": "AP", + "parameters": {"iou": 0.6}, + "value": 0.6633663366336634, + "label": {"key": "foo", "value": "bar"}, }, { "type": "AP", @@ -1456,37 +1706,46 @@ def test_evaluate_detection_with_label_maps( "label": {"key": "k1", "value": "v1"}, }, { - "type": "AR", - "parameters": {"ious": [0.1, 0.6]}, + "type": "AP", + "parameters": {"iou": 0.6}, "value": 0.0, - "label": {"key": "k1", "value": "v1"}, + "label": {"key": "k2", "value": "v2"}, }, { "type": "AR", "parameters": {"ious": [0.1, 0.6]}, "value": 0.0, - "label": {"key": "k2", "value": "v2"}, + "label": {"key": "k1", "value": "v1"}, }, { - "type": "AR", - "parameters": {"ious": [0.1, 0.6]}, - "value": 0.3333333333333333, # two missed groundtruth on the first image, and 1 hit for the second image - "label": {"key": "foo", "value": "bar"}, + "type": "mAP", + "parameters": {"iou": 0.1, "label_key": "foo"}, + "value": 0.6633663366336634, }, { - "type": "mAR", - "parameters": {"ious": [0.1, 0.6]}, - "value": 0.1111111111111111, + "type": "mAP", + "parameters": {"iou": 0.1, "label_key": "k1"}, + "value": 0.504950495049505, }, { "type": "mAP", - "parameters": {"iou": 0.1}, - "value": 0.3894389438943895, + "parameters": {"iou": 0.1, "label_key": "k2"}, + "value": 0.0, }, { "type": "mAP", - "parameters": {"iou": 0.6}, - "value": 0.3894389438943895, + "parameters": {"iou": 0.6, "label_key": "k2"}, + "value": 0.0, + }, + { + "type": "mAR", + "parameters": {"ious": [0.1, 0.6], "label_key": "foo"}, + "value": 0.3333333333333333, + }, + { + "type": "mAR", + "parameters": {"ious": [0.1, 0.6], "label_key": "k1"}, + "value": 0.0, }, { "type": "APAveragedOverIOUs", @@ -1500,6 +1759,16 @@ def test_evaluate_detection_with_label_maps( "value": 0.0, "label": {"key": "k2", "value": "v2"}, }, + { + "type": "mAPAveragedOverIOUs", + "parameters": {"ious": [0.1, 0.6], "label_key": "k1"}, + "value": 0.504950495049505, + }, + { + "type": "mAR", + "parameters": {"ious": [0.1, 0.6], "label_key": "k2"}, + "value": 0.0, + }, { "type": "APAveragedOverIOUs", "parameters": {"ious": [0.1, 0.6]}, @@ -1508,8 +1777,13 @@ def test_evaluate_detection_with_label_maps( }, { "type": "mAPAveragedOverIOUs", - "parameters": {"ious": [0.1, 0.6]}, - "value": 0.3894389438943895, + "parameters": {"ious": [0.1, 0.6], "label_key": "foo"}, + "value": 0.6633663366336634, + }, + { + "type": "mAPAveragedOverIOUs", + "parameters": {"ious": [0.1, 0.6], "label_key": "k2"}, + "value": 0.0, }, ] diff --git a/integration_tests/client/metrics/test_segmentation.py b/integration_tests/client/metrics/test_segmentation.py index 0b3cf9831..ec0298aec 100644 --- a/integration_tests/client/metrics/test_segmentation.py +++ b/integration_tests/client/metrics/test_segmentation.py @@ -36,7 +36,7 @@ def test_evaluate_segmentation( metrics = eval_job.metrics - assert len(metrics) == 3 + assert len(metrics) == 4 assert set( [ (m["label"]["key"], m["label"]["value"]) @@ -140,7 +140,7 @@ def test_evaluate_segmentation_with_label_maps( metrics = eval_job.metrics - assert len(metrics) == 3 + assert len(metrics) == 4 assert set( [ (m["label"]["key"], m["label"]["value"])