Skip to content

Commit b687013

Browse files
authored
Match HOTA with official result (#186)
* support computing hota metrics * fix code style for flake8 * update docstring * add unit test for hota metrics * add overall computation for hota * fix invalid denominator * fix unconinious id when computing assa * update README for hota metrics * update README for hota metrics without for loop * fix typo * add detail instruction for hota
1 parent 7210fcc commit b687013

File tree

6 files changed

+365
-38
lines changed

6 files changed

+365
-38
lines changed

Readme.md

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,9 @@ print(mh.list_metrics_markdown())
7171
| pred_frequencies | `pd.Series` Total number of occurrences of individual predictions over all frames. |
7272
| track_ratios | `pd.Series` Ratio of assigned to total appearance count per unique object id. |
7373
| id_global_assignment | `dict` ID measures: Global min-cost assignment for ID measures. |
74+
| deta_alpha | HOTA: Detection Accuracy (DetA) for a given threshold. |
75+
| assa_alpha | HOTA: Association Accuracy (AssA) for a given threshold. |
76+
| hota_alpha | HOTA: Higher Order Tracking Accuracy (HOTA) for a given threshold. |
7477

7578
<a name="MOTChallengeCompatibility"></a>
7679

@@ -362,6 +365,54 @@ OVERALL 80.0% 80.0% 80.0% 80.0% 80.0% 4 2 2 0 2 2 1 1 50.0% 0.275
362365
"""
363366
```
364367

368+
#### [Underdeveloped] Computing HOTA metrics
369+
370+
Computing HOTA metrics is also possible. However, it cannot be used with the `Accumulator` class directly, as HOTA requires to computing a reweighting matrix from all the frames at the beginning. Here is an example of how to use it:
371+
372+
```python
373+
import os
374+
import numpy as np
375+
import motmetrics as mm
376+
377+
378+
def compute_motchallenge(dir_name):
379+
# `gt.txt` and `test.txt` should be prepared in MOT15 format
380+
df_gt = mm.io.loadtxt(os.path.join(dir_name, "gt.txt"))
381+
df_test = mm.io.loadtxt(os.path.join(dir_name, "test.txt"))
382+
# Require different thresholds for matching
383+
th_list = np.arange(0.05, 0.99, 0.05)
384+
res_list = mm.utils.compare_to_groundtruth_reweighting(df_gt, df_test, "iou", distth=th_list)
385+
return res_list
386+
387+
# `data_dir` is the directory containing the gt.txt and test.txt files
388+
acc = compute_motchallenge("data_dir")
389+
mh = mm.metrics.create()
390+
391+
summary = mh.compute_many(
392+
acc,
393+
metrics=[
394+
"deta_alpha",
395+
"assa_alpha",
396+
"hota_alpha",
397+
],
398+
generate_overall=True, # `Overall` is the average we need only
399+
)
400+
strsummary = mm.io.render_summary(
401+
summary.iloc[[-1], :], # Use list to preserve `DataFrame` type
402+
formatters=mh.formatters,
403+
namemap={"hota_alpha": "HOTA", "assa_alpha": "ASSA", "deta_alpha": "DETA"},
404+
)
405+
print(strsummary)
406+
"""
407+
# data_dir=motmetrics/data/TUD-Campus
408+
DETA ASSA HOTA
409+
OVERALL 41.8% 36.9% 39.1%
410+
# data_dir=motmetrics/data/TUD-Stadtmitte
411+
DETA ASSA HOTA
412+
OVERALL 39.2% 40.9% 39.8%
413+
"""
414+
```
415+
365416
### Computing distances
366417

367418
Up until this point we assumed the pairwise object/hypothesis distances to be known. Usually this is not the case. You are mostly given either rectangles or points (centroids) of related objects. To compute a distance matrix from them you can use `motmetrics.distance` module as shown below.

motmetrics/distances.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ def boxiou(a, b):
8080
math_util.quiet_divide(i_vol, u_vol))
8181

8282

83-
def iou_matrix(objs, hyps, max_iou=1.):
83+
def iou_matrix(objs, hyps, max_iou=1., return_dist=True):
8484
"""Computes 'intersection over union (IoU)' distance matrix between object and hypothesis rectangles.
8585
8686
The IoU is computed as
@@ -104,11 +104,14 @@ def iou_matrix(objs, hyps, max_iou=1.):
104104
Maximum tolerable overlap distance. Object / hypothesis points
105105
with larger distance are set to np.nan signalling do-not-pair. Defaults
106106
to 0.5
107+
return_dist : bool
108+
If true, return distance matrix. If false, return similarity (IoU) matrix.
107109
108110
Returns
109111
-------
110112
C : NxK array
111113
Distance matrix containing pairwise distances or np.nan.
114+
if `return_dist` is False, then the matrix contains the pairwise IoU.
112115
"""
113116

114117
if np.size(objs) == 0 or np.size(hyps) == 0:
@@ -119,5 +122,7 @@ def iou_matrix(objs, hyps, max_iou=1.):
119122
assert objs.shape[1] == 4
120123
assert hyps.shape[1] == 4
121124
iou = boxiou(objs[:, None], hyps[None, :])
122-
dist = 1 - iou
123-
return np.where(dist > max_iou, np.nan, dist)
125+
if return_dist:
126+
dist = 1 - iou
127+
return np.where(dist > max_iou, np.nan, dist)
128+
return iou

motmetrics/metrics.py

Lines changed: 82 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,12 @@
99

1010
# pylint: disable=redefined-outer-name
1111

12-
from __future__ import absolute_import
13-
from __future__ import division
14-
from __future__ import print_function
12+
from __future__ import absolute_import, division, print_function
1513

16-
from collections import OrderedDict
1714
import inspect
1815
import logging
1916
import time
17+
from collections import OrderedDict
2018

2119
import numpy as np
2220
import pandas as pd
@@ -495,6 +493,22 @@ def num_predictions(df, pred_frequencies):
495493
simple_add_func.append(num_predictions)
496494

497495

496+
def num_gt_ids(df):
497+
"""Number of unique gt ids."""
498+
return df.full["OId"].dropna().unique().shape[0]
499+
500+
501+
simple_add_func.append(num_gt_ids)
502+
503+
504+
def num_dt_ids(df):
505+
"""Number of unique dt ids."""
506+
return df.full["HId"].dropna().unique().shape[0]
507+
508+
509+
simple_add_func.append(num_dt_ids)
510+
511+
498512
def track_ratios(df, obj_frequencies):
499513
"""Ratio of assigned to total appearance count per unique object id."""
500514
tracked = df.noraw[df.noraw.Type != "MISS"]["OId"].value_counts()
@@ -597,6 +611,64 @@ def recall_m(partials, num_detections, num_objects):
597611
return math_util.quiet_divide(num_detections, num_objects)
598612

599613

614+
def deta_alpha(df, num_detections, num_objects, num_false_positives):
615+
r"""DeTA under specific threshold $\alpha$
616+
Source: https://jonathonluiten.medium.com/how-to-evaluate-tracking-with-the-hota-metrics-754036d183e1
617+
"""
618+
del df # unused
619+
return math_util.quiet_divide(num_detections, max(1, num_objects + num_false_positives))
620+
621+
622+
def deta_alpha_m(partials):
623+
res = 0
624+
for v in partials:
625+
res += v["deta_alpha"]
626+
return math_util.quiet_divide(res, len(partials))
627+
628+
629+
def assa_alpha(df, num_detections, num_gt_ids, num_dt_ids):
630+
r"""AssA under specific threshold $\alpha$
631+
Source: https://github.com/JonathonLuiten/TrackEval/blob/12c8791b303e0a0b50f753af204249e622d0281a/trackeval/metrics/hota.py#L107-L108
632+
"""
633+
max_gt_ids = int(df.noraw.OId.max())
634+
max_dt_ids = int(df.noraw.HId.max())
635+
636+
match_count_array = np.zeros((max_gt_ids, max_dt_ids))
637+
gt_id_counts = np.zeros((max_gt_ids, 1))
638+
tracker_id_counts = np.zeros((1, max_dt_ids))
639+
for idx in range(len(df.noraw)):
640+
oid, hid = df.noraw.iloc[idx, 1], df.noraw.iloc[idx, 2]
641+
if df.noraw.iloc[idx, 0] in ["SWITCH", "MATCH"]:
642+
match_count_array[int(oid) - 1, int(hid) - 1] += 1
643+
if oid == oid: # check non nan
644+
gt_id_counts[int(oid) - 1] += 1
645+
if hid == hid:
646+
tracker_id_counts[0, int(hid) - 1] += 1
647+
648+
ass_a = match_count_array / np.maximum(1, gt_id_counts + tracker_id_counts - match_count_array)
649+
return math_util.quiet_divide((ass_a * match_count_array).sum(), max(1, num_detections))
650+
651+
652+
def assa_alpha_m(partials):
653+
res = 0
654+
for v in partials:
655+
res += v["assa_alpha"]
656+
return math_util.quiet_divide(res, len(partials))
657+
658+
659+
def hota_alpha(df, deta_alpha, assa_alpha):
660+
r"""HOTA under specific threshold $\alpha$"""
661+
del df
662+
return (deta_alpha * assa_alpha) ** 0.5
663+
664+
665+
def hota_alpha_m(partials):
666+
res = 0
667+
for v in partials:
668+
res += v["hota_alpha"]
669+
return math_util.quiet_divide(res, len(partials))
670+
671+
600672
class DataFrameMap: # pylint: disable=too-few-public-methods
601673
def __init__(self, full, raw, noraw, extra):
602674
self.full = full
@@ -783,6 +855,8 @@ def create():
783855
m.register(num_detections, formatter="{:d}".format)
784856
m.register(num_objects, formatter="{:d}".format)
785857
m.register(num_predictions, formatter="{:d}".format)
858+
m.register(num_gt_ids, formatter="{:d}".format)
859+
m.register(num_dt_ids, formatter="{:d}".format)
786860
m.register(num_unique_objects, formatter="{:d}".format)
787861
m.register(track_ratios)
788862
m.register(mostly_tracked, formatter="{:d}".format)
@@ -802,6 +876,10 @@ def create():
802876
m.register(idr, formatter="{:.1%}".format)
803877
m.register(idf1, formatter="{:.1%}".format)
804878

879+
m.register(deta_alpha, formatter="{:.1%}".format)
880+
m.register(assa_alpha, formatter="{:.1%}".format)
881+
m.register(hota_alpha, formatter="{:.1%}".format)
882+
805883
return m
806884

807885

motmetrics/mot.py

Lines changed: 36 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ def _append_to_events(self, typestr, oid, hid, distance):
134134
self._events['HId'].append(hid)
135135
self._events['D'].append(distance)
136136

137-
def update(self, oids, hids, dists, frameid=None, vf=''):
137+
def update(self, oids, hids, dists, frameid=None, vf='', similartiy_matrix=None, th=None):
138138
"""Updates the accumulator with frame specific objects/detections.
139139
140140
This method generates events based on the following algorithm [1]:
@@ -202,6 +202,12 @@ def update(self, oids, hids, dists, frameid=None, vf=''):
202202
self._append_to_indices(frameid, next(eid))
203203
self._append_to_events('RAW', np.nan, np.nan, np.nan)
204204

205+
# Postcompute the distance matrix if necessary. (e.g., HOTA)
206+
cost_for_matching = dists.copy()
207+
if similartiy_matrix is not None and th is not None:
208+
dists = 1 - similartiy_matrix
209+
dists = np.where(similartiy_matrix < th - np.finfo("float").eps, np.nan, dists)
210+
205211
# There must be at least one RAW event per object and hypothesis.
206212
# Record all finite distances as RAW events.
207213
valid_i, valid_j = np.where(np.isfinite(dists))
@@ -224,34 +230,36 @@ def update(self, oids, hids, dists, frameid=None, vf=''):
224230

225231
if oids.size * hids.size > 0:
226232
# 1. Try to re-establish tracks from correspondences in last update
227-
for i in range(oids.shape[0]):
228-
# No need to check oids_masked[i] here.
229-
if not (oids[i] in self.m and self.last_match[oids[i]] == self.last_update_frameid):
230-
continue
231-
232-
hprev = self.m[oids[i]]
233-
j, = np.where(~hids_masked & (hids == hprev))
234-
if j.shape[0] == 0:
235-
continue
236-
j = j[0]
233+
# ignore this if post processing is performed (e.g., HOTA)
234+
if similartiy_matrix is None or th is None:
235+
for i in range(oids.shape[0]):
236+
# No need to check oids_masked[i] here.
237+
if not (oids[i] in self.m and self.last_match[oids[i]] == self.last_update_frameid):
238+
continue
239+
240+
hprev = self.m[oids[i]]
241+
j, = np.where(~hids_masked & (hids == hprev))
242+
if j.shape[0] == 0:
243+
continue
244+
j = j[0]
245+
246+
if np.isfinite(dists[i, j]):
247+
o = oids[i]
248+
h = hids[j]
249+
oids_masked[i] = True
250+
hids_masked[j] = True
251+
self.m[oids[i]] = hids[j]
237252

238-
if np.isfinite(dists[i, j]):
239-
o = oids[i]
240-
h = hids[j]
241-
oids_masked[i] = True
242-
hids_masked[j] = True
243-
self.m[oids[i]] = hids[j]
244-
245-
self._append_to_indices(frameid, next(eid))
246-
self._append_to_events('MATCH', oids[i], hids[j], dists[i, j])
247-
self.last_match[o] = frameid
248-
self.hypHistory[h] = frameid
253+
self._append_to_indices(frameid, next(eid))
254+
self._append_to_events('MATCH', oids[i], hids[j], dists[i, j])
255+
self.last_match[o] = frameid
256+
self.hypHistory[h] = frameid
249257

250258
# 2. Try to remaining objects/hypotheses
251259
dists[oids_masked, :] = np.nan
252260
dists[:, hids_masked] = np.nan
253261

254-
rids, cids = linear_sum_assignment(dists)
262+
rids, cids = linear_sum_assignment(cost_for_matching)
255263

256264
for i, j in zip(rids, cids):
257265
if not np.isfinite(dists[i, j]):
@@ -265,10 +273,10 @@ def update(self, oids, hids, dists, frameid=None, vf=''):
265273
# self.m[o] != h and
266274
# abs(frameid - self.last_occurrence[o]) <= self.max_switch_time)
267275
switch_condition = (
268-
o in self.m and
269-
self.m[o] != h and
270-
o in self.last_occurrence and # Ensure the object ID 'o' is initialized in last_occurrence
271-
abs(frameid - self.last_occurrence[o]) <= self.max_switch_time
276+
o in self.m and
277+
self.m[o] != h and
278+
o in self.last_occurrence and # Ensure the object ID 'o' is initialized in last_occurrence
279+
abs(frameid - self.last_occurrence[o]) <= self.max_switch_time
272280
)
273281
is_switch = switch_condition
274282
######################################################################
@@ -471,7 +479,7 @@ def merge_event_dataframes(dfs, update_frame_indices=True, update_oids=True, upd
471479
copy['HId'] = copy['HId'].map(lambda x: hid_map[x], na_action='ignore')
472480
infos['hid_map'] = hid_map
473481

474-
r = pd.concat([r,copy])
482+
r = pd.concat([r, copy])
475483
mapping_infos.append(infos)
476484

477485
if return_mappings:

motmetrics/tests/test_metrics.py

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -537,3 +537,49 @@ def my_motp(df: mm.metrics.DataFrameMap):
537537
)
538538

539539
print(summary)
540+
541+
542+
def test_hota():
543+
TUD_golden_ans = { # From TrackEval
544+
"TUD-Campus": {"hota": 0.3913974378451139, "deta": 0.418047030142763, "assa": 0.36912068120832836},
545+
"TUD-Stadtmitte": {"hota": 0.3978490169927877, "deta": 0.3922675723693166, "assa": 0.4088407518112996}
546+
}
547+
548+
DATA_DIR = "motmetrics/data"
549+
550+
def compute_motchallenge(dname):
551+
df_gt = mm.io.loadtxt(os.path.join(dname, "gt.txt"))
552+
df_test = mm.io.loadtxt(os.path.join(dname, "test.txt"))
553+
th_list = np.arange(0.05, 0.99, 0.05)
554+
res_list = mm.utils.compare_to_groundtruth_reweighting(df_gt, df_test, "iou", distth=th_list)
555+
return res_list
556+
557+
accs = [compute_motchallenge(os.path.join(DATA_DIR, d)) for d in TUD_golden_ans.keys()]
558+
mh = mm.metrics.create()
559+
560+
for dataset_idx, dname in enumerate(TUD_golden_ans.keys()):
561+
deta = []
562+
assa = []
563+
hota = []
564+
for alpha_idx in range(len(accs[dataset_idx])):
565+
summary = mh.compute_many(
566+
[accs[dataset_idx][alpha_idx]],
567+
metrics=[
568+
"deta_alpha",
569+
"assa_alpha",
570+
"hota_alpha",
571+
],
572+
names=[dname],
573+
generate_overall=False,
574+
)
575+
deta.append(float(summary["deta_alpha"].iloc[0]))
576+
assa.append(float(summary["assa_alpha"].iloc[0]))
577+
hota.append(float(summary["hota_alpha"].iloc[0]))
578+
579+
deta = sum(deta) / len(deta)
580+
assa = sum(assa) / len(assa)
581+
hota = sum(hota) / len(hota)
582+
583+
assert deta == approx(TUD_golden_ans[dname]["deta"])
584+
assert assa == approx(TUD_golden_ans[dname]["assa"])
585+
assert hota == approx(TUD_golden_ans[dname]["hota"])

0 commit comments

Comments
 (0)