Skip to content

Commit

Permalink
fixed bug in averaging in case of AP and MOT metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
leonid-pishchulin committed Jun 30, 2018
1 parent 60cd97e commit 0268b26
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 11 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ Evaluation requires ground truth (GT) annotations available at [PoseTrack](https
"id": [0],
"x": [394],
"y": [173],
"score": [0.7],
},
{ ... }
]
Expand Down
6 changes: 3 additions & 3 deletions py/evaluateAP.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@ def computeMetrics(scoresAll, labelsAll, nGTall):
apAll[j] = eval_helpers.VOCap(recall, precision) * 100
preAll[j] = precision[len(precision) - 1] * 100
recAll[j] = recall[len(recall) - 1] * 100
idxs = np.argwhere(~np.isnan(apAll[:,0]))
idxs = np.argwhere(~np.isnan(apAll[:nGTall.shape[0],0]))
apAll[nGTall.shape[0]] = apAll[idxs, 0].mean()
idxs = np.argwhere(~np.isnan(recAll[:,0]))
idxs = np.argwhere(~np.isnan(recAll[:nGTall.shape[0],0]))
recAll[nGTall.shape[0]] = recAll[idxs, 0].mean()
idxs = np.argwhere(~np.isnan(preAll[:,0]))
idxs = np.argwhere(~np.isnan(preAll[:nGTall.shape[0],0]))
preAll[nGTall.shape[0]] = preAll[idxs, 0].mean()

return apAll, preAll, recAll
Expand Down
16 changes: 8 additions & 8 deletions py/evaluateTracking.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,13 +126,13 @@ def computeMetrics(gtFramesAll, motAll, outputDir, bSaveSeq):
numObj)

# average metrics over all joints per sequence
idxs = np.argwhere(~np.isnan(metricsSeqAll[si]['mota'][0,:]))
idxs = np.argwhere(~np.isnan(metricsSeqAll[si]['mota'][0,:nJoints]))
metricsSeqAll[si]['mota'][0,nJoints] = metricsSeqAll[si]['mota'][0,idxs].mean()
idxs = np.argwhere(~np.isnan(metricsSeqAll[si]['motp'][0,:]))
idxs = np.argwhere(~np.isnan(metricsSeqAll[si]['motp'][0,:nJoints]))
metricsSeqAll[si]['motp'][0,nJoints] = metricsSeqAll[si]['motp'][0,idxs].mean()
idxs = np.argwhere(~np.isnan(metricsSeqAll[si]['pre'][0,:]))
idxs = np.argwhere(~np.isnan(metricsSeqAll[si]['pre'][0,:nJoints]))
metricsSeqAll[si]['pre'][0,nJoints] = metricsSeqAll[si]['pre'] [0,idxs].mean()
idxs = np.argwhere(~np.isnan(metricsSeqAll[si]['rec'][0,:]))
idxs = np.argwhere(~np.isnan(metricsSeqAll[si]['rec'][0,:nJoints]))
metricsSeqAll[si]['rec'][0,nJoints] = metricsSeqAll[si]['rec'] [0,idxs].mean()

metricsSeq = metricsSeqAll[si].copy()
Expand Down Expand Up @@ -174,13 +174,13 @@ def computeMetrics(gtFramesAll, motAll, outputDir, bSaveSeq):
numObj)

# average metrics over all joints over all sequences
idxs = np.argwhere(~np.isnan(metricsFinAll['mota'][0,:]))
idxs = np.argwhere(~np.isnan(metricsFinAll['mota'][0,:nJoints]))
metricsFinAll['mota'][0,nJoints] = metricsFinAll['mota'][0,idxs].mean()
idxs = np.argwhere(~np.isnan(metricsFinAll['motp'][0,:]))
idxs = np.argwhere(~np.isnan(metricsFinAll['motp'][0,:nJoints]))
metricsFinAll['motp'][0,nJoints] = metricsFinAll['motp'][0,idxs].mean()
idxs = np.argwhere(~np.isnan(metricsFinAll['pre'][0,:]))
idxs = np.argwhere(~np.isnan(metricsFinAll['pre'][0,:nJoints]))
metricsFinAll['pre'][0,nJoints] = metricsFinAll['pre'] [0,idxs].mean()
idxs = np.argwhere(~np.isnan(metricsFinAll['rec'][0,:]))
idxs = np.argwhere(~np.isnan(metricsFinAll['rec'][0,:nJoints]))
metricsFinAll['rec'][0,nJoints] = metricsFinAll['rec'] [0,idxs].mean()

metricsFin = metricsFinAll.copy()
Expand Down

0 comments on commit 0268b26

Please sign in to comment.