Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Improve] Use metric_dict to replace hardcode arg in evaluate #286

Merged
merged 11 commits into from
Oct 31, 2020
13 changes: 8 additions & 5 deletions mmaction/datasets/activitynet_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,9 @@ def evaluate(
results (list[dict]): Output results.
metrics (str | sequence[str]): Metrics to be performed.
Defaults: 'AR@AN'.
metric_options (dict): Dict for metric options.
metric_options (dict): Dict for metric options. Options are
dreamerlin marked this conversation as resolved.
Show resolved Hide resolved
``max_avg_proposals``, ``temporal_iou_thresholds`` for
``AR@AN``.
logger (logging.Logger | None): Training logger. Defaults: None.

Returns:
Expand All @@ -226,10 +228,11 @@ def evaluate(

for metric in metrics:
if metric == 'AR@AN':
temporal_iou_thresholds = metric_options['AR@AN'].get(
'temporal_iou_thresholds')
max_avg_proposals = metric_options['AR@AN'].get(
'max_avg_proposals')
temporal_iou_thresholds = metric_options.setdefault(
'AR@AN', {}).setdefault('temporal_iou_thresholds',
np.linspace(0.5, 0.95, 10))
max_avg_proposals = metric_options.setdefault(
'AR@AN', {}).setdefault('max_avg_proposals', 100)
if isinstance(temporal_iou_thresholds, list):
temporal_iou_thresholds = np.array(temporal_iou_thresholds)

Expand Down
7 changes: 5 additions & 2 deletions mmaction/datasets/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,8 @@ def evaluate(self,
results (list): Output results.
metrics (str | sequence[str]): Metrics to be performed.
Defaults: 'top_k_accuracy'.
metric_options (dict): Dict for metric options.
metric_options (dict): Dict for metric options. Options are
``topk`` for ``top_k_accuracy``.
Default: ``dict(top_k_accuracy=dict(topk=(1, 5)))``.
logger (logging.Logger | None): Logger for recording.
Default: None.
Expand Down Expand Up @@ -167,7 +168,9 @@ def evaluate(self,
print_log(msg, logger=logger)

if metric == 'top_k_accuracy':
topk = metric_options['top_k_accuracy'].get('topk')
topk = metric_options.setdefault('top_k_accuracy',
{}).setdefault(
'topk', (1, 5))
if not isinstance(topk, (int, tuple)):
raise TypeError('topk must be int or tuple of int, '
f'but got {type(topk)}')
Expand Down
6 changes: 4 additions & 2 deletions mmaction/datasets/ssn_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,8 @@ def evaluate(self,
results (list[dict]): Output results.
metrics (str | sequence[str]): Metrics to be performed.
Defaults: 'mAP'.
metric_options (dict): Dict for metric options.
metric_options (dict): Dict for metric options. Options are
``eval_dataset`` for ``mAP``.
logger (logging.Logger | None): Logger for recording.
Default: None.

Expand Down Expand Up @@ -468,7 +469,8 @@ def evaluate(self,
eval_results = {}
for metric in metrics:
if metric == 'mAP':
eval_dataset = metric_options['mAP'].get('eval_dataset')
eval_dataset = metric_options.setdefault('mAP', {}).setdefault(
'eval_dataset', 'thumos14')
if eval_dataset == 'thumos14':
iou_range = np.arange(0.1, 1.0, .1)
ap_values = eval_ap(plain_detections, all_gts, iou_range)
Expand Down