Skip to content

Commit

Permalink
adding mAP metric (#394)
Browse files Browse the repository at this point in the history
  • Loading branch information
jshermeyer committed Aug 3, 2020
1 parent 1386d8c commit c087902
Show file tree
Hide file tree
Showing 2 changed files with 146 additions and 9 deletions.
151 changes: 144 additions & 7 deletions solaris/eval/vector.py
Expand Up @@ -94,7 +94,7 @@ def get_all_objects(proposal_polygons_dir, gt_polygons_dir,


def precision_calc(proposal_polygons_dir, gt_polygons_dir,
prediction_cat_attrib="class", gt_cat_attrib='make',
prediction_cat_attrib="class", gt_cat_attrib='make', confidence_attrib=None,
object_subset=[], threshold=0.5, file_format="geojson"):
""" Using the proposal and ground truth polygons, calculate precision metrics.
Filenames of predictions and ground-truth must be identical. Will only
Expand All @@ -111,6 +111,9 @@ def precision_calc(proposal_polygons_dir, gt_polygons_dir,
gt_cat_attrib : str
The column or attribute within the ground truth that
specifies unique classes
confidence_attrib : str
The column or attribute within the proposal polygons that
specifies model confidence for each prediction
object_subset : list
A list or subset of the unique objects that are contained within the
ground truth polygons. If empty, this will be
Expand All @@ -128,19 +131,23 @@ def precision_calc(proposal_polygons_dir, gt_polygons_dir,
A list containing the precision score for each class
mPrecision : float
The mean precision score of precision_by_class
confidences : list of lists
All confidences for each object for each class
"""
ious = []
os.chdir(proposal_polygons_dir)
search = "*" + file_format
proposal_geojsons = glob.glob(search)
iou_holder = []
confidences = []
if len(object_subset) == 0:
prop_objs, object_subset, all_objs = get_all_objects(
proposal_polygons_dir, gt_polygons_dir,
prediction_cat_attrib=prediction_cat_attrib,
gt_cat_attrib=gt_cat_attrib, file_format=file_format)
for i in range(len(object_subset)):
iou_holder.append([])
confidences.append([])

for geojson in tqdm(proposal_geojsons):
ground_truth_poly = os.path.join(gt_polygons_dir, geojson)
Expand All @@ -149,8 +156,11 @@ def precision_calc(proposal_polygons_dir, gt_polygons_dir,
proposal_gdf = gpd.read_file(geojson)
i = 0
for obj in object_subset:
conf_holder = []
proposal_gdf2 = proposal_gdf[proposal_gdf[prediction_cat_attrib] == obj]
for index, row in (proposal_gdf2.iterrows()):
if confidence_attrib is not None:
conf_holder.append(row[confidence_attrib])
iou_GDF = calculate_iou(row.geometry, ground_truth_gdf)
if 'iou_score' in iou_GDF.columns:
iou = iou_GDF.iou_score.max()
Expand All @@ -168,6 +178,9 @@ def precision_calc(proposal_polygons_dir, gt_polygons_dir,
ious.append(iou)
for item in ious:
iou_holder[i].append(item)
if confidence_attrib is not None:
for conf in conf_holder:
confidences[i].append(conf)
ious = []
i += 1
else:
Expand All @@ -181,13 +194,17 @@ def precision_calc(proposal_polygons_dir, gt_polygons_dir,
ious.append(0)
for item in ious:
iou_holder[i].append(item)
if confidence_attrib is not None:
for conf in conf_holder:
confidences[i].append(conf)
i += 1
ious = []
precision_by_class = average_score_by_class(iou_holder, threshold=0.5)
precision_by_class = list(np.nan_to_num(precision_by_class))
mPrecision = np.nanmean(precision_by_class)
print("mPrecision:", mPrecision)
return iou_holder, precision_by_class, mPrecision

return iou_holder, precision_by_class, mPrecision, confidences


def recall_calc(proposal_polygons_dir, gt_polygons_dir,
Expand Down Expand Up @@ -286,7 +303,7 @@ def recall_calc(proposal_polygons_dir, gt_polygons_dir,


def mF1(proposal_polygons_dir, gt_polygons_dir, prediction_cat_attrib="class",
gt_cat_attrib='make', object_subset=[], threshold=0.5,
gt_cat_attrib='make', object_subset=[], threshold=0.5, confidence_attrib=None,
file_format="geojson", all_outputs=False):
""" Using the proposal and ground truth polygons, calculate F1 and mF1
metrics. Filenames of predictions and ground-truth must be identical. Will
Expand All @@ -311,6 +328,9 @@ def mF1(proposal_polygons_dir, gt_polygons_dir, prediction_cat_attrib="class",
threshold : float
A value between 0.0 and 1.0 that determines the IOU threshold for a
true positve.
confidence_attrib : str
The column or attribute within the proposal polygons that
specifies model confidence for each prediction
file_format : str
The extension or file format for predictions
all_outputs : bool
Expand All @@ -336,6 +356,8 @@ def mF1(proposal_polygons_dir, gt_polygons_dir, prediction_cat_attrib="class",
The mean recall score of recall_by_class
object_subset : list
All unique objects that exist in the ground truth polygons
confidences : list of lists
All confidences for each object for each class
if all_outputs is `False`:
mF1_score : float
The mean F1 score of f1s_by_class (only calculated for ground
Expand All @@ -356,12 +378,11 @@ def mF1(proposal_polygons_dir, gt_polygons_dir, prediction_cat_attrib="class",
gt_cat_attrib=gt_cat_attrib, object_subset=object_subset,
threshold=threshold, file_format=file_format)
print("calculating precision...")
precision_iou_by_obj, precision_by_class, mPrecision = precision_calc(
precision_iou_by_obj, precision_by_class, mPrecision, confidences = precision_calc(
proposal_polygons_dir, gt_polygons_dir,
prediction_cat_attrib=prediction_cat_attrib,
gt_cat_attrib=gt_cat_attrib, object_subset=object_subset,
threshold=threshold, file_format=file_format)
print("")
threshold=threshold, confidence_attrib=confidence_attrib, file_format=file_format)
print("calculating F1 scores...")
f1s_by_class = []
for recall, precision in zip(recall_by_class, precision_by_class):
Expand All @@ -371,6 +392,122 @@ def mF1(proposal_polygons_dir, gt_polygons_dir, prediction_cat_attrib="class",
mF1_score = np.nanmean(f1s_by_class)
print("mF1:", mF1_score)
if all_outputs is True:
return mF1_score, f1s_by_class, precision_iou_by_obj, precision_by_class, mPrecision, recall_iou_by_obj, recall_by_class, mRecall, object_subset
return mF1_score, f1s_by_class, precision_iou_by_obj, precision_by_class, mPrecision, recall_iou_by_obj, recall_by_class, mRecall, object_subset, confidences
else:
return mF1_score, f1s_by_class


def mAP_score(proposal_polygons_dir, gt_polygons_dir,
prediction_cat_attrib="class", gt_cat_attrib='make',
object_subset=[], threshold=0.5, confidence_attrib="confidence",
file_format="geojson"):
""" Using the proposal and ground truth polygons calculate the Mean Average
Precision (mAP) and mF1 metrics. Filenames of predictions and ground-truth
must be identical. Will only calculate metric for classes that exist in
the ground truth.
Arguments
---------
proposal_polygons_dir : str
The path that contains any model proposal polygons
gt_polygons_dir : str
The path that contains the ground truth polygons
prediction_cat_attrib : str
The column or attribute within the predictions that specifies
unique classes
gt_cat_attrib : str
The column or attribute within the ground truth that
specifies unique classes
object_subset : list
A list or subset of the unique objects that are contained within the
proposal and ground truth polygons. If empty, this will be
auto-created using all classes that appear in the proposal and
ground truth polygons.
threshold : float
A value between 0.0 and 1.0 that determines the IOU threshold for a
true positve.
confidence_attrib : str
The column or attribute within the proposal polygons that
specifies model confidence for each prediction
file_format : str
The extension or file format for predictions
Returns
---------
mAP : float
The mean average precision score of APs_by_class
APs_by_class : list
A list containing the AP score for each class
mF1 : float
The mean F1 score of f1s_by_class
f1s_by_class : list
A list containing the f1 score for each class
precision_iou_by_obj : list of lists
An iou score for each object per class (precision specific)
precision_by_class : list
A list containing the precision score for each class
mPrecision : float
The mean precision score of precision_by_class
recall_iou_by_obj : list of lists
An iou score for each object per class (recall specific)
recall_by_class : list
A list containing the recall score for each class
mRecall : float
The mean recall score of recall_by_class
object_subset : list
All unique objects that exist in the ground truth polygons
confidences : list of lists
All confidences for each object for each class
"""

mF1_score, f1s_by_class, precision_iou_by_obj, precision_by_class, mPrecision, recall_iou_by_obj, recall_by_class, mRecall, object_subset, confidences = mF1(
proposal_polygons_dir, gt_polygons_dir,
prediction_cat_attrib=prediction_cat_attrib,
gt_cat_attrib=gt_cat_attrib, object_subset=object_subset,
threshold=threshold, confidence_attrib=confidence_attrib,
file_format=file_format, all_outputs=True)

recall_thresholds = np.arange(0, 1.01, 0.01).tolist()
APs_by_class = []
for p_obj_list, c_obj_list, r_obj_list in zip(precision_iou_by_obj, confidences, recall_iou_by_obj):
num_objs = len(r_obj_list)
p_obj_list_sorted = [x for _, x in sorted(zip(c_obj_list, p_obj_list))]
p_obj_list_sorted.reverse()
TPs = []
FPs = []
for p in p_obj_list_sorted:
if p >= threshold:
TPs.append(1)
FPs.append(0)
else:
TPs.append(0)
FPs.append(1)
Acc_TPs = []
Acc_FPs = []
t_sum = 0
f_sum = 0
for t, f in zip(TPs, FPs):
t_sum += t
f_sum += f
Acc_TPs.append(t_sum)
Acc_FPs.append(f_sum)
precisions = []
recalls = []

for aTP, aFP in zip(Acc_TPs, Acc_FPs):
precision = (aTP / (aTP + aFP))
precisions.append(precision)
recall = (aTP / num_objs)
recalls.append(recall)
interp = []
for t in recall_thresholds:
precisions2 = [p for r, p in zip(recalls, precisions) if r >= t]
if len(precisions2) > 0:
interp.append(np.max(precisions2))
else:
interp.append(0)

AP = np.average(interp)
APs_by_class.append(AP)
mAP = np.average(APs_by_class)
print("mAP:", mAP, "@IOU:", threshold)
return mAP, APs_by_class, mF1_score, f1s_by_class, precision_iou_by_obj, precision_by_class, mPrecision, recall_iou_by_obj, recall_by_class, mRecall, object_subset, confidences
4 changes: 2 additions & 2 deletions tests/test_eval/vector_test.py
Expand Up @@ -9,5 +9,5 @@ class TestVectorMetrics(object):
def test_vector_metrics(self):
proposal_polygons_dir = os.path.join(data_dir, "eval_vector/preds/")
gt_polygons_dir = os.path.join(data_dir, "eval_vector/gt/")
mF1_score, f1s_by_class, precision_iou_by_obj, precision_by_class, mPrecision, recall_iou_by_obj, recall_by_class, mRecall, object_subset = vector.mF1(proposal_polygons_dir, gt_polygons_dir, prediction_cat_attrib="class", gt_cat_attrib='make', all_outputs=True)
assert mF1_score.round(2) == 0.83
mAP, APs_by_class, mF1_score, f1s_by_class, precision_iou_by_obj, precision_by_class, mPrecision, recall_iou_by_obj, recall_by_class, mRecall, object_subset, confidences = vector.mAP_score(proposal_polygons_dir, gt_polygons_dir, prediction_cat_attrib="class", gt_cat_attrib='make')
assert mAP.round(2) == 0.85

0 comments on commit c087902

Please sign in to comment.