-
Notifications
You must be signed in to change notification settings - Fork 258
/
metrics.py
828 lines (643 loc) · 25.8 KB
/
metrics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
# py-motmetrics - Metrics for multiple object tracker (MOT) benchmarking.
# https://github.com/cheind/py-motmetrics/
#
# MIT License
# Copyright (c) 2017-2020 Christoph Heindl, Jack Valmadre and others.
# See LICENSE file for terms.
"""Obtain metrics from event logs."""
# pylint: disable=redefined-outer-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import inspect
import logging
import time
import numpy as np
import pandas as pd
from motmetrics import math_util
from motmetrics.lap import linear_sum_assignment
from motmetrics.mot import MOTAccumulator
try:
_getargspec = inspect.getfullargspec
except AttributeError:
_getargspec = inspect.getargspec
class MetricsHost:
"""Keeps track of metrics and intra metric dependencies."""
def __init__(self):
self.metrics = OrderedDict()
def register(
self,
fnc,
deps="auto",
name=None,
helpstr=None,
formatter=None,
fnc_m=None,
deps_m="auto",
):
"""Register a new metric.
Params
------
fnc : Function
Function that computes the metric to be registered. The number of arguments
is 1 + N, where N is the number of dependencies of the metric to be registered.
The order of the argument passed is `df, result_dep1, result_dep2, ...`.
Kwargs
------
deps : string, list of strings or None, optional
The dependencies of this metric. Each dependency is evaluated and the result
is passed as argument to `fnc` as described above. If None is specified, the
function does not have any dependencies. If a list of strings is given, dependencies
for these metric strings are registered. If 'auto' is passed, the dependencies
are deduced from argument inspection of the method. For this to work the argument
names have to be equal to the intended dependencies.
name : string or None, optional
Name identifier of this metric. If None is passed the name is deduced from
function inspection.
helpstr : string or None, optional
A description of what the metric computes. If no help message is given it
is deduced from the docstring of the function.
formatter: Format object, optional
An optional default formatter when rendering metric results as string. I.e to
render the result `0.35` as `35%` one would pass `{:.2%}.format`
fnc_m : Function or None, optional
Function that merges metric results. The number of arguments
is 1 + N, where N is the number of dependencies of the metric to be registered.
The order of the argument passed is `df, result_dep1, result_dep2, ...`.
"""
assert fnc is not None, "No function given for metric {}".format(name)
if deps is None:
deps = []
elif deps == "auto":
if _getargspec(fnc).defaults is not None:
k = -len(_getargspec(fnc).defaults)
else:
k = len(_getargspec(fnc).args)
deps = _getargspec(fnc).args[1:k] # assumes dataframe as first argument
if name is None:
name = (
fnc.__name__
) # Relies on meaningful function names, i.e don't use for lambdas
if helpstr is None:
helpstr = inspect.getdoc(fnc) if inspect.getdoc(fnc) else "No description."
helpstr = " ".join(helpstr.split())
if fnc_m is None and name + "_m" in globals():
fnc_m = globals()[name + "_m"]
if fnc_m is not None:
if deps_m is None:
deps_m = []
elif deps_m == "auto":
if _getargspec(fnc_m).defaults is not None:
k = -len(_getargspec(fnc_m).defaults)
else:
k = len(_getargspec(fnc_m).args)
deps_m = _getargspec(fnc_m).args[
1:k
] # assumes dataframe as first argument
else:
deps_m = None
self.metrics[name] = {
"name": name,
"fnc": fnc,
"fnc_m": fnc_m,
"deps": deps,
"deps_m": deps_m,
"help": helpstr,
"formatter": formatter,
}
@property
def names(self):
"""Returns the name identifiers of all registered metrics."""
return [v["name"] for v in self.metrics.values()]
@property
def formatters(self):
"""Returns the formatters for all metrics that have associated formatters."""
return {
k: v["formatter"]
for k, v in self.metrics.items()
if v["formatter"] is not None
}
def list_metrics(self, include_deps=False):
"""Returns a dataframe containing names, descriptions and optionally dependencies for each metric."""
cols = ["Name", "Description", "Dependencies"]
if include_deps:
data = [(m["name"], m["help"], m["deps"]) for m in self.metrics.values()]
else:
data = [(m["name"], m["help"]) for m in self.metrics.values()]
cols = cols[:-1]
return pd.DataFrame(data, columns=cols)
def list_metrics_markdown(self, include_deps=False):
"""Returns a markdown ready version of `list_metrics`."""
df = self.list_metrics(include_deps=include_deps)
fmt = [":---" for i in range(len(df.columns))]
df_fmt = pd.DataFrame([fmt], columns=df.columns)
df_formatted = pd.concat([df_fmt, df])
return df_formatted.to_csv(sep="|", index=False)
def compute(
self,
df,
ana=None,
metrics=None,
return_dataframe=True,
return_cached=False,
name=None,
):
"""Compute metrics on the dataframe / accumulator.
Params
------
df : MOTAccumulator or pandas.DataFrame
The dataframe to compute the metrics on
Kwargs
------
ana: dict or None, optional
To cache results for fast computation.
metrics : string, list of string or None, optional
The identifiers of the metrics to be computed. This method will only
compute the minimal set of necessary metrics to fullfill the request.
If None is passed all registered metrics are computed.
return_dataframe : bool, optional
Return the result as pandas.DataFrame (default) or dict.
return_cached : bool, optional
If true all intermediate metrics required to compute the desired metrics are returned as well.
name : string, optional
When returning a pandas.DataFrame this is the index of the row containing
the computed metric values.
"""
if isinstance(df, MOTAccumulator):
df = df.events
if metrics is None:
metrics = motchallenge_metrics
elif isinstance(metrics, str):
metrics = [metrics]
df_map = events_to_df_map(df)
cache = {}
options = {"ana": ana}
for mname in metrics:
cache[mname] = self._compute(
df_map, mname, cache, options, parent="summarize"
)
if name is None:
name = 0
if return_cached:
data = cache
else:
data = OrderedDict([(k, cache[k]) for k in metrics])
ret = pd.DataFrame(data, index=[name]) if return_dataframe else data
return ret
def compute_overall(
self,
partials,
metrics=None,
return_dataframe=True,
return_cached=False,
name=None,
):
"""Compute overall metrics based on multiple results.
Params
------
partials : list of metric results to combine overall
Kwargs
------
metrics : string, list of string or None, optional
The identifiers of the metrics to be computed. This method will only
compute the minimal set of necessary metrics to fullfill the request.
If None is passed all registered metrics are computed.
return_dataframe : bool, optional
Return the result as pandas.DataFrame (default) or dict.
return_cached : bool, optional
If true all intermediate metrics required to compute the desired metrics are returned as well.
name : string, optional
When returning a pandas.DataFrame this is the index of the row containing
the computed metric values.
Returns
-------
df : pandas.DataFrame
A datafrom containing the metrics in columns and names in rows.
"""
if metrics is None:
metrics = motchallenge_metrics
elif isinstance(metrics, str):
metrics = [metrics]
cache = {}
for mname in metrics:
cache[mname] = self._compute_overall(
partials, mname, cache, parent="summarize"
)
if name is None:
name = 0
if return_cached:
data = cache
else:
data = OrderedDict([(k, cache[k]) for k in metrics])
return pd.DataFrame(data, index=[name]) if return_dataframe else data
def compute_many(
self, dfs, anas=None, metrics=None, names=None, generate_overall=False
):
"""Compute metrics on multiple dataframe / accumulators.
Params
------
dfs : list of MOTAccumulator or list of pandas.DataFrame
The data to compute metrics on.
Kwargs
------
anas: dict or None, optional
To cache results for fast computation.
metrics : string, list of string or None, optional
The identifiers of the metrics to be computed. This method will only
compute the minimal set of necessary metrics to fullfill the request.
If None is passed all registered metrics are computed.
names : list of string, optional
The names of individual rows in the resulting dataframe.
generate_overall : boolean, optional
If true resulting dataframe will contain a summary row that is computed
using the same metrics over an accumulator that is the concatentation of
all input containers. In creating this temporary accumulator, care is taken
to offset frame indices avoid object id collisions.
Returns
-------
df : pandas.DataFrame
A datafrom containing the metrics in columns and names in rows.
"""
if metrics is None:
metrics = motchallenge_metrics
elif isinstance(metrics, str):
metrics = [metrics]
assert names is None or len(names) == len(dfs)
st = time.time()
if names is None:
names = list(range(len(dfs)))
if anas is None:
anas = [None] * len(dfs)
partials = [
self.compute(
acc,
ana=analysis,
metrics=metrics,
name=name,
return_cached=True,
return_dataframe=False,
)
for acc, analysis, name in zip(dfs, anas, names)
]
logging.info("partials: %.3f seconds.", time.time() - st)
details = partials
partials = [
pd.DataFrame(OrderedDict([(k, i[k]) for k in metrics]), index=[name])
for i, name in zip(partials, names)
]
if generate_overall:
names = "OVERALL"
# merged, infomap = MOTAccumulator.merge_event_dataframes(dfs, return_mappings = True)
# dfs = merged
# anas = MOTAccumulator.merge_analysis(anas, infomap)
# partials.append(self.compute(dfs, ana=anas, metrics=metrics, name=names)[0])
partials.append(self.compute_overall(details, metrics=metrics, name=names))
logging.info("mergeOverall: %.3f seconds.", time.time() - st)
return pd.concat(partials)
def _compute(self, df_map, name, cache, options, parent=None):
"""Compute metric and resolve dependencies."""
assert name in self.metrics, "Cannot find metric {} required by {}.".format(
name, parent
)
already = cache.get(name, None)
if already is not None:
return already
minfo = self.metrics[name]
vals = []
for depname in minfo["deps"]:
v = cache.get(depname, None)
if v is None:
v = cache[depname] = self._compute(
df_map, depname, cache, options, parent=name
)
vals.append(v)
if _getargspec(minfo["fnc"]).defaults is None:
return minfo["fnc"](df_map, *vals)
else:
return minfo["fnc"](df_map, *vals, **options)
def _compute_overall(self, partials, name, cache, parent=None):
assert name in self.metrics, "Cannot find metric {} required by {}.".format(
name, parent
)
already = cache.get(name, None)
if already is not None:
return already
minfo = self.metrics[name]
vals = []
for depname in minfo["deps_m"]:
v = cache.get(depname, None)
if v is None:
v = cache[depname] = self._compute_overall(
partials, depname, cache, parent=name
)
vals.append(v)
assert minfo["fnc_m"] is not None, "merge function for metric %s is None" % name
return minfo["fnc_m"](partials, *vals)
simple_add_func = []
def num_frames(df):
"""Total number of frames."""
return df.full.index.get_level_values(0).unique().shape[0]
simple_add_func.append(num_frames)
def obj_frequencies(df):
"""Total number of occurrences of individual objects over all frames."""
return df.noraw.OId.value_counts()
def pred_frequencies(df):
"""Total number of occurrences of individual predictions over all frames."""
return df.noraw.HId.value_counts()
def num_unique_objects(df, obj_frequencies):
"""Total number of unique object ids encountered."""
del df # unused
return len(obj_frequencies)
simple_add_func.append(num_unique_objects)
def num_matches(df):
"""Total number matches."""
return df.noraw.Type.isin(["MATCH"]).sum()
simple_add_func.append(num_matches)
def num_switches(df):
"""Total number of track switches."""
return df.noraw.Type.isin(["SWITCH"]).sum()
simple_add_func.append(num_switches)
def num_transfer(df):
"""Total number of track transfer."""
return df.extra.Type.isin(["TRANSFER"]).sum()
simple_add_func.append(num_transfer)
def num_ascend(df):
"""Total number of track ascend."""
return df.extra.Type.isin(["ASCEND"]).sum()
simple_add_func.append(num_ascend)
def num_migrate(df):
"""Total number of track migrate."""
return df.extra.Type.isin(["MIGRATE"]).sum()
simple_add_func.append(num_migrate)
def num_false_positives(df):
"""Total number of false positives (false-alarms)."""
return df.noraw.Type.isin(["FP"]).sum()
simple_add_func.append(num_false_positives)
def num_misses(df):
"""Total number of misses."""
return df.noraw.Type.isin(["MISS"]).sum()
simple_add_func.append(num_misses)
def num_detections(df, num_matches, num_switches):
"""Total number of detected objects including matches and switches."""
del df # unused
return num_matches + num_switches
simple_add_func.append(num_detections)
def num_objects(df, obj_frequencies):
"""Total number of unique object appearances over all frames."""
del df # unused
return obj_frequencies.sum()
simple_add_func.append(num_objects)
def num_predictions(df, pred_frequencies):
"""Total number of unique prediction appearances over all frames."""
del df # unused
return pred_frequencies.sum()
simple_add_func.append(num_predictions)
def track_ratios(df, obj_frequencies):
"""Ratio of assigned to total appearance count per unique object id."""
tracked = df.noraw[df.noraw.Type != "MISS"]["OId"].value_counts()
return tracked.div(obj_frequencies).fillna(0.0)
def mostly_tracked(df, track_ratios):
"""Number of objects tracked for at least 80 percent of lifespan."""
del df # unused
return track_ratios[track_ratios >= 0.8].count()
simple_add_func.append(mostly_tracked)
def partially_tracked(df, track_ratios):
"""Number of objects tracked between 20 and 80 percent of lifespan."""
del df # unused
return track_ratios[(track_ratios >= 0.2) & (track_ratios < 0.8)].count()
simple_add_func.append(partially_tracked)
def mostly_lost(df, track_ratios):
"""Number of objects tracked less than 20 percent of lifespan."""
del df # unused
return track_ratios[track_ratios < 0.2].count()
simple_add_func.append(mostly_lost)
def num_fragmentations(df, obj_frequencies):
"""Total number of switches from tracked to not tracked."""
fra = 0
for o in obj_frequencies.index:
# Find first and last time object was not missed (track span). Then count
# the number switches from NOT MISS to MISS state.
dfo = df.noraw[df.noraw.OId == o]
notmiss = dfo[dfo.Type != "MISS"]
if len(notmiss) == 0:
continue
first = notmiss.index[0]
last = notmiss.index[-1]
diffs = dfo.loc[first:last].Type.apply(lambda x: 1 if x == "MISS" else 0).diff()
fra += diffs[diffs == 1].count()
return fra
simple_add_func.append(num_fragmentations)
def motp(df, num_detections):
"""Multiple object tracker precision."""
return math_util.quiet_divide(df.noraw["D"].sum(), num_detections)
def motp_m(partials, num_detections):
res = 0
for v in partials:
res += v["motp"] * v["num_detections"]
return math_util.quiet_divide(res, num_detections)
def mota(df, num_misses, num_switches, num_false_positives, num_objects):
"""Multiple object tracker accuracy."""
del df # unused
return 1.0 - math_util.quiet_divide(
num_misses + num_switches + num_false_positives, num_objects
)
def mota_m(partials, num_misses, num_switches, num_false_positives, num_objects):
del partials # unused
return 1.0 - math_util.quiet_divide(
num_misses + num_switches + num_false_positives, num_objects
)
def precision(df, num_detections, num_false_positives):
"""Number of detected objects over sum of detected and false positives."""
del df # unused
return math_util.quiet_divide(num_detections, num_false_positives + num_detections)
def precision_m(partials, num_detections, num_false_positives):
del partials # unused
return math_util.quiet_divide(num_detections, num_false_positives + num_detections)
def recall(df, num_detections, num_objects):
"""Number of detections over number of objects."""
del df # unused
return math_util.quiet_divide(num_detections, num_objects)
def recall_m(partials, num_detections, num_objects):
del partials # unused
return math_util.quiet_divide(num_detections, num_objects)
class DataFrameMap: # pylint: disable=too-few-public-methods
def __init__(self, full, raw, noraw, extra):
self.full = full
self.raw = raw
self.noraw = noraw
self.extra = extra
def events_to_df_map(df):
raw = df[df.Type == "RAW"]
noraw = df[
(df.Type != "RAW")
& (df.Type != "ASCEND")
& (df.Type != "TRANSFER")
& (df.Type != "MIGRATE")
]
extra = df[df.Type != "RAW"]
df_map = DataFrameMap(full=df, raw=raw, noraw=noraw, extra=extra)
return df_map
def extract_counts_from_df_map(df):
"""
Returns:
Tuple (ocs, hcs, tps).
ocs: Dict from object id to count.
hcs: Dict from hypothesis id to count.
tps: Dict from (object id, hypothesis id) to true-positive count.
The ids are arbitrary, they might NOT be consecutive integers from 0.
"""
oids = df.full["OId"].dropna().unique()
hids = df.full["HId"].dropna().unique()
flat = df.raw.reset_index()
# Exclude events that do not belong to either set.
flat = flat[flat["OId"].isin(oids) | flat["HId"].isin(hids)]
# Count number of frames where each (non-empty) OId and HId appears.
ocs = flat.set_index("OId")["FrameId"].groupby("OId").nunique().to_dict()
hcs = flat.set_index("HId")["FrameId"].groupby("HId").nunique().to_dict()
# Select three columns of interest and index by ('OId', 'HId').
dists = flat[["OId", "HId", "D"]].set_index(["OId", "HId"]).dropna()
# Count events with non-empty distance for each pair.
tps = dists.groupby(["OId", "HId"])["D"].count().to_dict()
return ocs, hcs, tps
def id_global_assignment(df, ana=None):
"""ID measures: Global min-cost assignment for ID measures."""
# pylint: disable=too-many-locals
del ana # unused
ocs, hcs, tps = extract_counts_from_df_map(df)
oids = sorted(ocs.keys())
hids = sorted(hcs.keys())
oids_idx = dict((o, i) for i, o in enumerate(oids))
hids_idx = dict((h, i) for i, h in enumerate(hids))
no = len(ocs)
nh = len(hcs)
fpmatrix = np.full((no + nh, no + nh), 0.0)
fnmatrix = np.full((no + nh, no + nh), 0.0)
fpmatrix[no:, :nh] = np.nan
fnmatrix[:no, nh:] = np.nan
for oid, oc in ocs.items():
r = oids_idx[oid]
fnmatrix[r, :nh] = oc
fnmatrix[r, nh + r] = oc
for hid, hc in hcs.items():
c = hids_idx[hid]
fpmatrix[:no, c] = hc
fpmatrix[c + no, c] = hc
for (oid, hid), ex in tps.items():
r = oids_idx[oid]
c = hids_idx[hid]
fpmatrix[r, c] -= ex
fnmatrix[r, c] -= ex
costs = fpmatrix + fnmatrix
rids, cids = linear_sum_assignment(costs)
return {
"fpmatrix": fpmatrix,
"fnmatrix": fnmatrix,
"rids": rids,
"cids": cids,
"costs": costs,
"min_cost": costs[rids, cids].sum(),
}
def idfp(df, id_global_assignment):
"""ID measures: Number of false positive matches after global min-cost matching."""
del df # unused
rids, cids = id_global_assignment["rids"], id_global_assignment["cids"]
return id_global_assignment["fpmatrix"][rids, cids].sum()
simple_add_func.append(idfp)
def idfn(df, id_global_assignment):
"""ID measures: Number of false negatives matches after global min-cost matching."""
del df # unused
rids, cids = id_global_assignment["rids"], id_global_assignment["cids"]
return id_global_assignment["fnmatrix"][rids, cids].sum()
simple_add_func.append(idfn)
def idtp(df, id_global_assignment, num_objects, idfn):
"""ID measures: Number of true positives matches after global min-cost matching."""
del df, id_global_assignment # unused
return num_objects - idfn
simple_add_func.append(idtp)
def idp(df, idtp, idfp):
"""ID measures: global min-cost precision."""
del df # unused
return math_util.quiet_divide(idtp, idtp + idfp)
def idp_m(partials, idtp, idfp):
del partials # unused
return math_util.quiet_divide(idtp, idtp + idfp)
def idr(df, idtp, idfn):
"""ID measures: global min-cost recall."""
del df # unused
return math_util.quiet_divide(idtp, idtp + idfn)
def idr_m(partials, idtp, idfn):
del partials # unused
return math_util.quiet_divide(idtp, idtp + idfn)
def idf1(df, idtp, num_objects, num_predictions):
"""ID measures: global min-cost F1 score."""
del df # unused
return math_util.quiet_divide(2 * idtp, num_objects + num_predictions)
def idf1_m(partials, idtp, num_objects, num_predictions):
del partials # unused
return math_util.quiet_divide(2 * idtp, num_objects + num_predictions)
for one in simple_add_func:
name = one.__name__
def getSimpleAdd(nm):
def simpleAddHolder(partials):
res = 0
for v in partials:
res += v[nm]
return res
return simpleAddHolder
locals()[name + "_m"] = getSimpleAdd(name)
def create():
"""Creates a MetricsHost and populates it with default metrics."""
m = MetricsHost()
m.register(num_frames, formatter="{:d}".format)
m.register(obj_frequencies, formatter="{:d}".format)
m.register(pred_frequencies, formatter="{:d}".format)
m.register(num_matches, formatter="{:d}".format)
m.register(num_switches, formatter="{:d}".format)
m.register(num_transfer, formatter="{:d}".format)
m.register(num_ascend, formatter="{:d}".format)
m.register(num_migrate, formatter="{:d}".format)
m.register(num_false_positives, formatter="{:d}".format)
m.register(num_misses, formatter="{:d}".format)
m.register(num_detections, formatter="{:d}".format)
m.register(num_objects, formatter="{:d}".format)
m.register(num_predictions, formatter="{:d}".format)
m.register(num_unique_objects, formatter="{:d}".format)
m.register(track_ratios)
m.register(mostly_tracked, formatter="{:d}".format)
m.register(partially_tracked, formatter="{:d}".format)
m.register(mostly_lost, formatter="{:d}".format)
m.register(num_fragmentations)
m.register(motp, formatter="{:.3f}".format)
m.register(mota, formatter="{:.1%}".format)
m.register(precision, formatter="{:.1%}".format)
m.register(recall, formatter="{:.1%}".format)
m.register(id_global_assignment)
m.register(idfp)
m.register(idfn)
m.register(idtp)
m.register(idp, formatter="{:.1%}".format)
m.register(idr, formatter="{:.1%}".format)
m.register(idf1, formatter="{:.1%}".format)
return m
motchallenge_metrics = [
"idf1",
"idp",
"idr",
"recall",
"precision",
"num_unique_objects",
"mostly_tracked",
"partially_tracked",
"mostly_lost",
"num_false_positives",
"num_misses",
"num_switches",
"num_fragmentations",
"mota",
"motp",
"num_transfer",
"num_ascend",
"num_migrate",
]
"""A list of all metrics from MOTChallenge."""