Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Improve] Update unittest #322

Merged
merged 6 commits into from
Nov 11, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 25 additions & 3 deletions tests/test_accuracy.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
from numpy.testing import assert_array_almost_equal, assert_array_equal

from mmaction.core import (average_recall_at_avg_proposals, confusion_matrix,
get_weighted_score, mean_class_accuracy,
mmit_mean_average_precision, pairwise_temporal_iou,
top_k_accuracy)
get_weighted_score, mean_average_precision,
mean_class_accuracy, mmit_mean_average_precision,
pairwise_temporal_iou, top_k_accuracy)


def gt_confusion_matrix(gt_labels, pred_labels, normalize=None):
Expand Down Expand Up @@ -235,3 +235,25 @@ def test_get_weighted_score():
x * coeff_a + y * coeff_b for x, y in zip(score_a, score_b)
]
assert np.all(np.isclose(np.array(ground_truth), np.array(weighted_score)))


def test_mean_average_precision():

def content_for_unittest(scores, labels, result):
gt = mean_average_precision(scores, labels)
assert gt == result

scores = [
np.array([0.1, 0.2, 0.3, 0.4]),
np.array([0.2, 0.3, 0.4, 0.1]),
np.array([0.3, 0.4, 0.1, 0.2]),
np.array([0.4, 0.1, 0.2, 0.3])
]

label1 = np.array([[1, 1, 0, 0], [1, 0, 1, 1], [1, 0, 1, 0], [1, 1, 0, 1]])
result1 = 2 / 3
label2 = np.array([[0, 1, 0, 1], [0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1]])
result2 = np.mean([0.5, 0.5833333333333333, 0.8055555555555556, 1.0])

content_for_unittest(scores, label1, result1)
content_for_unittest(scores, label2, result2)
2 changes: 1 addition & 1 deletion tests/test_models/test_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def test_inference_recognizer():
inference_recognizer(model, 'demo/', label_path)

for ops in model.cfg.data.test.pipeline:
if ops['type'] == 'TenCrop':
if ops['type'] in ('TenCrop', 'ThreeCrop'):
# Use CenterCrop to reduce memory in order to pass CI
ops['type'] = 'CenterCrop'

Expand Down
106 changes: 106 additions & 0 deletions tests/test_runtime/test_apis_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
import sys
from unittest.mock import MagicMock, Mock, patch

import pytest
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset

from mmaction.apis.test import (collect_results_cpu, multi_gpu_test,
single_gpu_test)


class OldStyleModel(nn.Module):

def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
self.cnt = 0

def forward(self, return_loss, **kwargs):
result = [self.cnt]
self.cnt += 1
return result


class Model(OldStyleModel):

def train_step(self):
pass

def val_step(self):
pass


class ExampleDataset(Dataset):

def __init__(self):
self.index = 0
self.eval_result = [1, 4, 3, 7, 2, -3, 4, 6]

def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results

def __len__(self):
return len(self.eval_result)


def test_single_gpu_test():
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=1)
model = Model()

results = single_gpu_test(model, loader)
assert results == list(range(8))


def mock_tensor_without_cuda(*args, **kwargs):
if 'device' not in kwargs:
return torch.Tensor(*args)
return torch.IntTensor(*args, device='cpu')


@patch('mmaction.apis.test.collect_results_gpu',
Mock(return_value=list(range(8))))
@patch('mmaction.apis.test.collect_results_cpu',
Mock(return_value=list(range(8))))
def test_multi_gpu_test():
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=1)
model = Model()

results = multi_gpu_test(model, loader)
assert results == list(range(8))

results = multi_gpu_test(model, loader, gpu_collect=False)
assert results == list(range(8))


@patch('mmcv.runner.get_dist_info', Mock(return_value=(0, 1)))
@patch('torch.distributed.broadcast', MagicMock)
@patch('torch.distributed.barrier', Mock)
@pytest.mark.skipif(
sys.version_info[:2] == (3, 8), reason='Not for python 3.8')
def test_collect_results_cpu():

def content_for_unittest():
results_part = list(range(8))
size = 8

results = collect_results_cpu(results_part, size)
assert results == list(range(8))

results = collect_results_cpu(results_part, size, 'unittest')
assert results == list(range(8))

if not torch.cuda.is_available():
with patch(
'torch.full',
Mock(
return_value=torch.full(
(512, ), 32, dtype=torch.uint8, device='cpu'))):
with patch('torch.tensor', mock_tensor_without_cuda):
content_for_unittest()
else:
content_for_unittest()
6 changes: 3 additions & 3 deletions tests/test_config.py → tests/test_runtime/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

def _get_config_path():
"""Find the predefined recognizer config path."""
repo_dir = osp.dirname(osp.dirname(__file__))
repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dir, 'configs')
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
Expand All @@ -26,7 +26,7 @@ def _get_config_path():
def test_config_build_recognizer():
"""Test that all mmaction models defined in the configs can be
initialized."""
repo_dir = osp.dirname(osp.dirname(__file__))
repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dir, 'configs/recognition')
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
Expand All @@ -49,7 +49,7 @@ def test_config_build_recognizer():

def _get_config_path_for_localizer():
"""Find the predefined localizer config path for localizer."""
repo_dir = osp.dirname(osp.dirname(__file__))
repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dir, 'configs/localization')
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,6 @@ def val_step(self, x, optimizer, **kwargs):
return runner


@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_eval_hook():
with pytest.raises(TypeError):
# `save_best` should be a boolean
Expand Down
File renamed without changes.
File renamed without changes.
1 change: 1 addition & 0 deletions tests/test_train.py → tests/test_runtime/test_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ def test_train_model():
workflow=[('train', 1)],
total_epochs=5,
evaluation=dict(interval=1, key_indicator='acc'),
omnisource=False,
data=dict(
videos_per_gpu=1,
workers_per_gpu=0,
Expand Down