Skip to content

Commit

Permalink
Merge pull request #37 from antocuni/save-params
Browse files Browse the repository at this point in the history
Improve the handling of multiple parametrized tests
  • Loading branch information
ionelmc committed Dec 30, 2015
2 parents 3e0f17f + df09b90 commit aa1f5a7
Show file tree
Hide file tree
Showing 5 changed files with 83 additions and 15 deletions.
25 changes: 21 additions & 4 deletions src/pytest_benchmark/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,12 +106,14 @@ def pytest_addoption(parser):
group.addoption(
"--benchmark-sort",
metavar="COL", type=parse_sort, default="min",
help="Column to sort on. Can be one of: 'min', 'max', 'mean' or 'stddev'. Default: %(default)r"
help="Column to sort on. Can be one of: 'min', 'max', 'mean', 'stddev', "
"'name', 'fullname'. Default: %(default)r"
)
group.addoption(
"--benchmark-group-by",
metavar="LABEL", default="group",
help="How to group tests. Can be one of: 'group', 'name', 'fullname', 'func', 'fullfunc' or 'param'."
help="How to group tests. Can be one of: 'group', 'name', 'fullname', 'func', 'fullfunc', "
"'param' or 'param:NAME', where NAME is the name passed to @pytest.parametrize."
" Default: %(default)r"
)
group.addoption(
Expand Down Expand Up @@ -237,6 +239,7 @@ def __init__(self, fixture, iterations, options):
self.fullname = fixture.fullname
self.group = fixture.group
self.param = fixture.param
self.params = fixture.params

self.iterations = iterations
self.stats = Stats()
Expand Down Expand Up @@ -290,7 +293,12 @@ def __init__(self, node, disable_gc, timer, min_rounds, min_time, max_time, warm
self.name = node.name
self.fullname = node._nodeid
self.disable = disable
self.param = node.callspec.id if hasattr(node, 'callspec') else None
if hasattr(node, 'callspec'):
self.param = node.callspec.id
self.params = node.callspec.params
else:
self.param = None
self.params = None
self.group = group
self.has_error = False

Expand Down Expand Up @@ -857,7 +865,10 @@ def display_results_table(self, tr):
worst[prop] = max(benchmark[prop] for _, benchmark in report_progress(
benchmarks, tr, "{line} ({pos}/{total})", line=line))

unit, adjustment = time_unit(best.get(self.sort, benchmarks[0][self.sort]))
time_unit_key = self.sort
if self.sort in ("name", "fullname"):
time_unit_key = "min"
unit, adjustment = time_unit(best.get(self.sort, benchmarks[0][time_unit_key]))
labels = {
"name": "Name (time in %ss)" % unit,
"min": "Min",
Expand Down Expand Up @@ -990,8 +1001,13 @@ def pytest_benchmark_group_stats(config, benchmarks, group_by):
groups[bench.fullname].append(bench)
elif group_by == "param":
groups[bench.param].append(bench)
elif group_by.startswith("param:"):
param_name = group_by[len("param:"):]
param_value = bench.params[param_name]
groups[param_value].append(bench)
else:
raise NotImplementedError("Unsupported grouping %r." % group_by)
#
for grouped_benchmarks in groups.values():
grouped_benchmarks.sort(key=operator.attrgetter("fullname" if "full" in group_by else "name"))
return sorted(groups.items(), key=lambda pair: pair[0] or "")
Expand Down Expand Up @@ -1053,6 +1069,7 @@ def pytest_benchmark_generate_json(config, benchmarks, include_data):
"group": bench.group,
"name": bench.name,
"fullname": bench.fullname,
"params": bench.params,
"stats": dict(bench.json(include_data=include_data), iterations=bench.iterations),
"options": dict(
(k, v.__name__ if callable(v) else v) for k, v in bench.options.items()
Expand Down
5 changes: 3 additions & 2 deletions src/pytest_benchmark/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,10 +176,11 @@ def parse_timer(string):

def parse_sort(string):
string = string.lower().strip()
if string not in ("min", "max", "mean", "stddev"):
if string not in ("min", "max", "mean", "stddev", "name", "fullname"):
raise argparse.ArgumentTypeError(
"Unacceptable value: %r. "
"Value for --benchmark-sort must be one of: 'min', 'max', 'mean' or 'stddev'." % string)
"Value for --benchmark-sort must be one of: 'min', 'max', 'mean', "
"'stddev', 'name', 'fullname'." % string)
return string


Expand Down
62 changes: 55 additions & 7 deletions tests/test_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,13 @@ def test_help(testdir):
" --benchmark-min-rounds=NUM",
" Minimum rounds, even if total time would exceed",
" `--max-time`. Default: 5",
" --benchmark-sort=COL Column to sort on. Can be one of: 'min', 'max', 'mean'",
" or 'stddev'. Default: 'min'",
" --benchmark-sort=COL Column to sort on. Can be one of: 'min', 'max',",
" 'mean', 'stddev', 'name', 'fullname'. Default: 'min'",
" --benchmark-group-by=LABEL",
" How to group tests. Can be one of: 'group', 'name',",
" 'fullname', 'func', 'fullfunc' or 'param'. Default:",
" 'group'",
" 'fullname', 'func', 'fullfunc', 'param' or",
" 'param:NAME', where NAME is the name passed to",
" @pytest.parametrize. Default: 'group'",
" --benchmark-timer=FUNC",
" Timer to use when measuring time. Default:*",
" --benchmark-calibration-precision=NUM",
Expand Down Expand Up @@ -160,6 +161,23 @@ def test_b(benchmark, foo):
benchmark(int)
'''

GROUPING_PARAMS_TEST = '''
import pytest
@pytest.mark.parametrize("bar", ["bar1", "bar2"])
@pytest.mark.parametrize("foo", ["foo1", "foo2"])
@pytest.mark.benchmark(group="A")
def test_a(benchmark, foo, bar):
benchmark(str)
@pytest.mark.parametrize("bar", ["bar1", "bar2"])
@pytest.mark.parametrize("foo", ["foo1", "foo2"])
@pytest.mark.benchmark(group="B")
def test_b(benchmark, foo, bar):
benchmark(int)
'''


def test_group_by_name(testdir):
test_x = testdir.makepyfile(test_x=GROUPING_TEST)
Expand Down Expand Up @@ -264,7 +282,7 @@ def test_group_by_fullfunc(testdir):
])


def test_group_by_param(testdir):
def test_group_by_param_all(testdir):
test_x = testdir.makepyfile(test_x=GROUPING_TEST)
test_y = testdir.makepyfile(test_y=GROUPING_TEST)
result = testdir.runpytest('--benchmark-max-time=0.0000001', '--benchmark-group-by', 'param', test_x, test_y)
Expand Down Expand Up @@ -293,6 +311,37 @@ def test_group_by_param(testdir):
'============* 8 passed* seconds ============*',
])

def test_group_by_param_select(testdir):
test_x = testdir.makepyfile(test_x=GROUPING_PARAMS_TEST)
result = testdir.runpytest('--benchmark-max-time=0.0000001',
'--benchmark-group-by', 'param:foo',
'--benchmark-sort', 'fullname',
test_x)
result.stdout.fnmatch_lines([
'*', '*', '*', '*', '*',
"* benchmark 'foo1': 4 tests *",
'Name (time in ?s) *',
'-------------------*',
'test_a[[]foo1-bar1[]] *',
'test_a[[]foo1-bar2[]] *',
'test_b[[]foo1-bar1[]] *',
'test_b[[]foo1-bar2[]] *',
'-------------------*',
'',
"* benchmark 'foo2': 4 tests *",
'Name (time in ?s) *',
'------------------*',
'test_a[[]foo2-bar1[]] *',
'test_a[[]foo2-bar2[]] *',
'test_b[[]foo2-bar1[]] *',
'test_b[[]foo2-bar2[]] *',
'------------------*',
'',
'(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd '
'Quartile.',
'============* 8 passed* seconds ============*',
])


def test_group_by_fullname(testdir):
test_x = testdir.makepyfile(test_x=GROUPING_TEST)
Expand Down Expand Up @@ -676,8 +725,7 @@ def test_bogus_sort(testdir):
result = testdir.runpytest('--benchmark-sort=bogus', test)
result.stderr.fnmatch_lines([
"usage: py* [[]options[]] [[]file_or_dir[]] [[]file_or_dir[]] [[]...[]]",
"py*: error: argument --benchmark-sort: Unacceptable value: 'bogus'. Value for --benchmark-sort must be one of: 'min', 'max', 'mean' or 'stddev'."

"py*: error: argument --benchmark-sort: Unacceptable value: 'bogus'. Value for --benchmark-sort must be one of: 'min', 'max', 'mean', 'stddev', 'name', 'fullname'."
])


Expand Down
1 change: 1 addition & 0 deletions tests/test_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ def __init__(self):
group=bench['group'],
options=bench['options'],
has_error=False,
params=None,
**bench['stats']
)
for bench in data['benchmarks']
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
"benchmarks": [
{
"name": "test_xfast_parametrized[0]",
"group": null,
"group": null,
"params": null,
"stats": {
"rounds": 9710,
"iqr_outliers": 1726,
Expand Down Expand Up @@ -47,4 +48,4 @@
"dirty": true
},
"version": "2.5.0"
}
}

0 comments on commit aa1f5a7

Please sign in to comment.