Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion MetricsReloaded/metrics/pairwise_measures.py
Original file line number Diff line number Diff line change
Expand Up @@ -1200,6 +1200,7 @@ def measured_distance(self):
and masd

"""

if "hd_perc" in self.dict_args.keys():
perc = self.dict_args["hd_perc"]
else:
Expand All @@ -1208,13 +1209,19 @@ def measured_distance(self):
if np.sum(self.pred + self.ref) == 0:
warnings.warn("Prediction and reference empty - distances set to 0")
return 0, 0, 0, 0
if np.sum(self.pred) == 0 and np.sum(self.ref)>0:
warnings.warn("Prediction empty but reference not empty - need to set to worse case in aggregation")
return np.nan, np.nan, np.nan, np.nan
if np.sum(self.ref) == 0 and np.sum(self.pred)>0:
warnings.warn('Prediction not empty but reference empty - non existing output - need be set to WORSE case in aggregation')
return np.nan, np.nan, np.nan, np.nan
(
ref_border_dist,
pred_border_dist,
ref_border,
pred_border,
) = self.border_distance()
print(ref_border_dist)
# print(ref_border_dist)
average_distance = (np.sum(ref_border_dist) + np.sum(pred_border_dist)) / (
np.sum(pred_border + ref_border)
)
Expand Down
1 change: 1 addition & 0 deletions test/test_metrics/test_calibration_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ def test_brier_score():
expected_bs = 0.4
assert_allclose(expected_bs, value_test, atol=0.01)

#To use SN 2.14 p 99 of Metrics Reloaded

def test_top_label_classification_error():
ref_tce = [1, 0, 2, 1]
Expand Down
27 changes: 26 additions & 1 deletion test/test_metrics/test_pairwise_measures.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
ppm212_1 = PM(pred212, ref212)
ppm212_2 = PM(pred212,ref212,dict_args={'boundary_dist':2})

#Data for figure 5c (Hausdoff with annotation error p14 Pitfalls)
#Data for figure 5c (Hausdorff with annotation error p14 Pitfalls)
ref5c = np.zeros([14, 14])
ref5c[1, 1] = 1
ref5c[9:12, 9:12] = 1
Expand Down Expand Up @@ -630,6 +630,7 @@ def test_mcc():
assert mcc < 1



def test_distance_empty():
"""
Testing that output is 0 when reference and prediction empty for calculation of distance
Expand Down Expand Up @@ -833,6 +834,30 @@ def test_hausdorff_distance_5c():
hausdorff_distance_perc, expected_hausdorff_distance_perc, atol=0.01
)

def test_distance_empty_ref():
ppm1 = PM(pred29_1, ref29_1*0)
hd, hd_perc, masd, assd = ppm1.measured_distance()
assert np.isnan(hd)
assert np.isnan(hd_perc)
assert np.isnan(masd)
assert np.isnan(assd)

def test_distance_empty_pred():
ppm1 = PM(pred29_1*0, ref29_1)
hd, hd_perc, masd, assd = ppm1.measured_distance()
assert np.isnan(hd)
assert np.isnan(hd_perc)
assert np.isnan(masd)
assert np.isnan(assd)


def test_distance_empty_pred_and_ref():
ppm1 = PM(pred29_1*0, ref29_1*0)
hd, hd_perc, masd, assd = ppm1.measured_distance()
assert hd == 0
assert hd_perc == 0
assert masd == 0
assert assd == 0

def test_boundary_iou():
"""
Expand Down
38 changes: 19 additions & 19 deletions test/test_processes/test_mixed_measures_processes.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,26 +9,26 @@
from sklearn.metrics import cohen_kappa_score as cks
from sklearn.metrics import matthews_corrcoef as mcc

# panoptic quality
pq_pred1 = np.zeros([21, 21])
pq_pred1[5:7, 2:5] = 1
pq_pred2 = np.zeros([21, 21])
pq_pred2[14:18, 4:6] = 1
pq_pred2[16, 3] = 1
pq_pred3 = np.zeros([21, 21])
pq_pred3[14:18, 7:12] = 1
pq_pred4 = np.zeros([21, 21])
pq_pred4[2:8, 13:16] = 1
pq_pred4[2:4, 12] = 1
# Data for panoptic quality Figure 3.51 p96
pq_pred1 = np.zeros([18, 18])
pq_pred1[ 3:7,1:3] = 1
pq_pred1[3:6,3:7]=1
pq_pred2 = np.zeros([18, 18])
pq_pred2[13:16,4:6] = 1
pq_pred3 = np.zeros([18, 18])
pq_pred3[7:12,13:17] = 1
pq_pred4 = np.zeros([18, 18])
pq_pred4[13:15,13:17] = 1
pq_pred4[15,15] = 1

pq_ref1 = np.zeros([21, 21])
pq_ref1[8:11, 3] = 1
pq_ref1[9, 2:5] = 1
pq_ref2 = np.zeros([21, 21])
pq_ref2[14:19, 7:13] = 1
pq_ref3 = np.zeros([21, 21])
pq_ref3[2:7, 14:17] = 1
pq_ref3[2:4, 12:14] = 1
pq_ref1 = np.zeros([18, 18])
pq_ref1[2:7, 1:3] = 1
pq_ref1[2:5,3:6] = 1
pq_ref2 = np.zeros([18, 18])
pq_ref2[6:12,12:17] = 1
pq_ref3 = np.zeros([18, 18])
pq_ref3[14:15:,7:10] = 1
pq_ref3[13:16,8:9] = 1


def test_mismatch_category():
Expand Down
6 changes: 5 additions & 1 deletion test/test_utility/test_assignment_localization.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,11 @@
pred_boxes_6a = [pred6a1, pred6a2, pred6a3, pred6a4, pred6a5]
ref_boxes_6a = [ref6a1, ref6a2, ref6a3, ref6a4]

#Data from Panoptic Quality - 3.51 p96
#Data from SN2.17 from Metrics Reloaded



#Data from Panoptic Quality - 3.51 p96 of Pitfalls
#Figure 3.51 p96
pq_pred1 = np.zeros([18, 18])
pq_pred1[ 3:7,1:3] = 1
Expand Down
Loading