Skip to content

Commit

Permalink
Merge 4f9fb13 into e21d193
Browse files Browse the repository at this point in the history
  • Loading branch information
Artimi authored Oct 6, 2016
2 parents e21d193 + 4f9fb13 commit e2acd74
Show file tree
Hide file tree
Showing 11 changed files with 150 additions and 12 deletions.
1 change: 1 addition & 0 deletions AUTHORS.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,4 @@ Authors
* Stefan Krastanov - http://blog.krastanov.org/
* Thomas Waldmann - https://github.com/ThomasWaldmann
* Antonio Cuni - http://antocuni.eu/en/
* Petr Šebek - https://github.com/Artimi
1 change: 1 addition & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ Changelog
* ``--group-by=LABEL``
* ``--columns=LABELS``
* ``--histogram=[FILENAME-PREFIX]``
* Added ``--benchmark-cprofile`` that profiles last run of benchmarked function. Contributed by Petr Šebek.


3.0.0 (2015-11-08)
Expand Down
6 changes: 6 additions & 0 deletions docs/usage.rst
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,12 @@ Commandline options
--benchmark-save or --benchmark-autosave are used).
Default: './.benchmarks/<os>-<pyimplementation>-<pyversion>-<arch>bit',
example: 'Linux-CPython-2.7-64bit'.
--benchmark-cprofile=COLUMN
If specified measure one run with cProfile and stores
10 top functions. Argument is a column to sort by.
Available columns: 'ncallls_recursion', 'ncalls',
'tottime', 'tottime_per', 'cumtime', 'cumtime_per',
'function_name'.
--benchmark-histogram=FILENAME-PREFIX
Plot graphs of min/max/avg/stddev over time in
FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX
Expand Down
26 changes: 21 additions & 5 deletions src/pytest_benchmark/fixture.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from __future__ import division
from __future__ import print_function

import cProfile
import pstats
import gc
import sys
import time
Expand All @@ -19,7 +21,7 @@
statistics = None
else:
statistics_error = None
from .stats import BenchmarkStats
from .stats import Metadata


class FixtureAlreadyUsed(Exception):
Expand All @@ -37,7 +39,7 @@ def _get_precision(cls, timer):
return cls._precisions.setdefault(timer, compute_timer_precision(timer))

def __init__(self, node, disable_gc, timer, min_rounds, min_time, max_time, warmup, warmup_iterations,
calibration_precision, add_stats, logger, warner, disabled, group=None):
calibration_precision, add_stats, logger, warner, disabled, use_cprofile, group=None):
self.name = node.name
self.fullname = node._nodeid
self.disabled = disabled
Expand All @@ -62,6 +64,8 @@ def __init__(self, node, disable_gc, timer, min_rounds, min_time, max_time, warm
self._warner = warner
self._cleanup_callbacks = []
self._mode = None
self.use_cprofile = use_cprofile
self.cprofile_stats = None

@property
def enabled(self):
Expand Down Expand Up @@ -94,7 +98,7 @@ def runner(loops_range, timer=self._timer):
return runner

def _make_stats(self, iterations):
bench_stats = BenchmarkStats(self, iterations=iterations, options={
bench_stats = Metadata(self, iterations=iterations, options={
"disable_gc": self._disable_gc,
"timer": self._timer,
"min_rounds": self._min_rounds,
Expand All @@ -103,7 +107,7 @@ def _make_stats(self, iterations):
"warmup": self._warmup,
})
self._add_stats(bench_stats)
self.stats = bench_stats.stats
self.stats = bench_stats
return bench_stats

def __call__(self, function_to_benchmark, *args, **kwargs):
Expand Down Expand Up @@ -154,7 +158,13 @@ def _raw(self, function_to_benchmark, *args, **kwargs):
for _ in XRANGE(rounds):
stats.update(runner(loops_range))
self._logger.debug(" Ran for %ss." % format_time(time.time() - run_start), yellow=True, bold=True)
return function_to_benchmark(*args, **kwargs)
if self.use_cprofile:
profile = cProfile.Profile()
function_result = profile.runcall(function_to_benchmark, *args, **kwargs)
self.stats.cprofile_stats = pstats.Stats(profile)
else:
function_result = function_to_benchmark(*args, **kwargs)
return function_result

def _raw_pedantic(self, target, args=(), kwargs=None, setup=None, rounds=1, warmup_rounds=0, iterations=1):
if kwargs is None:
Expand Down Expand Up @@ -208,6 +218,12 @@ def make_arguments(args=args, kwargs=kwargs):
if loops_range:
args, kwargs = make_arguments()
result = target(*args, **kwargs)

if self.use_cprofile:
profile = cProfile.Profile()
profile.runcall(target, *args, **kwargs)
self.stats.cprofile_stats = pstats.Stats(profile)

return result

def weave(self, target, **kwargs):
Expand Down
8 changes: 8 additions & 0 deletions src/pytest_benchmark/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,14 @@ def pytest_addoption(parser):
help="Fail test if performance regresses according to given EXPR"
" (eg: min:5%% or mean:0.001 for number of seconds). Can be used multiple times."
)
group.addoption(
"--benchmark-cprofile",
metavar="COLUMN", default=None, choices=['ncalls_recursion', 'ncalls', 'tottime',
'tottime_per', 'cumtime', 'cumtime_per', 'function_name'],
help="If specified measure one run with cProfile and stores 10 top functions."
" Argument is a column to sort by. Available columns: 'ncallls_recursion',"
" 'ncalls', 'tottime', 'tottime_per', 'cumtime', 'cumtime_per', 'function_name'."
)
add_global_options(group.addoption)
add_display_options(group.addoption)
add_histogram_options(group.addoption)
Expand Down
25 changes: 23 additions & 2 deletions src/pytest_benchmark/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,11 @@ def __init__(self, config):
disable_gc=config.getoption("benchmark_disable_gc"),
warmup=config.getoption("benchmark_warmup"),
warmup_iterations=config.getoption("benchmark_warmup_iterations"),
use_cprofile=bool(config.getoption("benchmark_cprofile")),
)
self.skip = config.getoption("benchmark_skip")
self.disabled = config.getoption("benchmark_disable") and not config.getoption("benchmark_enable")
self.cprofile_sort_by = config.getoption("benchmark_cprofile")

if config.getoption("dist", "no") != "no" and not self.skip:
self.logger.warn(
Expand Down Expand Up @@ -105,7 +107,7 @@ def prepare_benchmarks(self):
if bench.fullname in compared_mapping:
compared = compared_mapping[bench.fullname]
source = short_filename(path, self.machine_id)
flat_bench = bench.as_dict(include_data=False, stats=False)
flat_bench = bench.as_dict(include_data=False, stats=False, cprofile=self.cprofile_sort_by)
flat_bench.update(compared["stats"])
flat_bench["path"] = str(path)
flat_bench["source"] = source
Expand All @@ -115,7 +117,7 @@ def prepare_benchmarks(self):
if fail:
self.performance_regressions.append((self.name_format(flat_bench), fail))
yield flat_bench
flat_bench = bench.as_dict(include_data=False, flat=True)
flat_bench = bench.as_dict(include_data=False, flat=True, cprofile=self.cprofile_sort_by)
flat_bench["path"] = None
flat_bench["source"] = compared and "NOW"
yield flat_bench
Expand Down Expand Up @@ -230,6 +232,7 @@ def display(self, tr):
)
results_table.display(tr, self.groups)
self.check_regressions()
self.display_cprofile(tr)

def check_regressions(self):
if self.compare_fail and not self.compared_mapping:
Expand All @@ -240,3 +243,21 @@ def check_regressions(self):
"\t%s - %s" % line for line in self.performance_regressions
))
raise PerformanceRegression("Performance has regressed.")

def display_cprofile(self, tr):
if self.options["use_cprofile"]:
tr.section("cProfile information")
tr.write_line("Time in s")
for group in self.groups:
group_name, benchmarks = group
for benchmark in benchmarks:
tr.write(benchmark["fullname"], yellow=True)
if benchmark["source"]:
tr.write_line(" ({})".format((benchmark["source"])))
else:
tr.write("\n")
tr.write_line("ncalls\ttottime\tpercall\tcumtime\tpercall\tfilename:lineno(function)")
for function_info in benchmark["cprofile"]:
line = "{ncalls_recursion}\t{tottime:.{prec}f}\t{tottime_per:.{prec}f}\t{cumtime:.{prec}f}\t{cumtime_per:.{prec}f}\t{function_name}".format(prec=4, **function_info)
tr.write_line(line)
tr.write("\n")
24 changes: 22 additions & 2 deletions src/pytest_benchmark/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,11 @@
import statistics
from bisect import bisect_left
from bisect import bisect_right
import operator

from .utils import cached_property
from .utils import funcname
from .utils import get_cprofile_functions


class Stats(object):
Expand Down Expand Up @@ -161,13 +163,14 @@ def outliers(self):
return "%s;%s" % (self.stddev_outliers, self.iqr_outliers)


class BenchmarkStats(object):
class Metadata(object):
def __init__(self, fixture, iterations, options):
self.name = fixture.name
self.fullname = fixture.fullname
self.group = fixture.group
self.param = fixture.param
self.params = fixture.params
self.cprofile_stats = fixture.cprofile_stats

self.iterations = iterations
self.stats = Stats()
Expand Down Expand Up @@ -196,7 +199,7 @@ def __getitem__(self, key):
def has_error(self):
return self.fixture.has_error

def as_dict(self, include_data=True, flat=False, stats=True):
def as_dict(self, include_data=True, flat=False, stats=True, cprofile=None):
result = {
"group": self.group,
"name": self.name,
Expand All @@ -207,6 +210,23 @@ def as_dict(self, include_data=True, flat=False, stats=True):
(k, funcname(v) if callable(v) else v) for k, v in self.options.items()
)
}
if self.cprofile_stats:
cprofile_list = result["cprofile"] = []
cprofile_functions = get_cprofile_functions(self.cprofile_stats)
stats_columns = ["cumtime", "tottime","ncalls", "ncalls_recursion",
"tottime_per", "cumtime_per", "function_name"]
# move column first
if cprofile is not None:
stats_columns.remove(cprofile)
stats_columns.insert(0, cprofile)
for column in stats_columns:
cprofile_functions.sort(key=operator.itemgetter(column), reverse=True)
for cprofile_function in cprofile_functions[:25]:
if cprofile_function not in cprofile_list:
cprofile_list.append(cprofile_function)
# if we want only one column or we already have all available functions
if cprofile is None or len(cprofile_functions) == len(cprofile_list):
break
if stats:
stats = self.stats.as_dict()
if include_data:
Expand Down
31 changes: 31 additions & 0 deletions src/pytest_benchmark/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -452,3 +452,34 @@ def commonpath(paths):
except (TypeError, AttributeError):
genericpath._check_arg_types('commonpath', *paths)
raise


def get_cprofile_functions(stats):
"""
Convert pstats structure to list of sorted dicts about each function.
"""
result = []
# this assumes that you run py.test from project root dir
project_dir_parent = os.path.dirname(os.getcwd())

for function_info, run_info in stats.stats.items():
file_path = function_info[0]
if file_path.startswith(project_dir_parent):
file_path = file_path[len(project_dir_parent):].lstrip('/')
function_name = '{0}:{1}({2})'.format(file_path, function_info[1], function_info[2])

# if the function is recursive write number of 'total calls/primitive calls'
if run_info[0] == run_info[1]:
calls = str(run_info[0])
else:
calls = '{1}/{0}'.format(run_info[0], run_info[1])

result.append(dict(ncalls_recursion=calls,
ncalls=run_info[1],
tottime=run_info[2],
tottime_per=run_info[2] / run_info[0] if run_info[0] > 0 else 0,
cumtime=run_info[3],
cumtime_per=run_info[3] / run_info[0] if run_info[0] > 0 else 0,
function_name=function_name))

return result
26 changes: 26 additions & 0 deletions tests/test_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,12 @@ def test_help(testdir):
" Fail test if performance regresses according to given",
" EXPR (eg: min:5% or mean:0.001 for number of seconds).",
" Can be used multiple times.",
" --benchmark-cprofile=COLUMN",
" If specified measure one run with cProfile and stores",
" 10 top functions. Argument is a column to sort by.",
" Available columns: 'ncallls_recursion', 'ncalls',",
" 'tottime', 'tottime_per', 'cumtime', 'cumtime_per',",
" 'function_name'.",
" --benchmark-storage=STORAGE-PATH",
" Specify a different path to store the runs (when",
" --benchmark-save or --benchmark-autosave are used).",
Expand Down Expand Up @@ -766,6 +772,26 @@ def test_xdist_verbose(testdir):
])


def test_cprofile(testdir):
test = testdir.makepyfile(SIMPLE_TEST)
result = testdir.runpytest('--benchmark-cprofile=cumtime', test)
result.stdout.fnmatch_lines([
"============================= cProfile information =============================",
"Time in s",
"test_cprofile.py::test_fast",
"ncalls tottime percall cumtime percall filename:lineno(function)",
# "1 0.0000 0.0000 0.0001 0.0001 test_cprofile0/test_cprofile.py:9(result)",
# "1 0.0001 0.0001 0.0001 0.0001 ~:0(<built-in method time.sleep>)",
# "1 0.0000 0.0000 0.0000 0.0000 ~:0(<method 'disable' of '_lsprof.Profiler' objects>)",
"",
"test_cprofile.py::test_slow",
"ncalls tottime percall cumtime percall filename:lineno(function)",
# "1 0.0000 0.0000 0.1002 0.1002 test_cprofile0/test_cprofile.py:15(<lambda>)",
# "1 0.1002 0.1002 0.1002 0.1002 ~:0(<built-in method time.sleep>)",
# "1 0.0000 0.0000 0.0000 0.0000 ~:0(<method 'disable' of '_lsprof.Profiler' objects>)",
])


def test_abort_broken(testdir):
"""
Test that we don't benchmark code that raises exceptions.
Expand Down
4 changes: 2 additions & 2 deletions tests/test_normal.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def result():
assert result is None

if not benchmark.disabled:
assert benchmark.stats.min >= 0.000001
assert benchmark.stats.stats.min >= 0.000001


def test_slow(benchmark):
Expand All @@ -42,4 +42,4 @@ def foo(request):
def test_parametrized(benchmark, foo):
benchmark(time.sleep, 0.00001)
if benchmark.enabled:
assert benchmark.stats.min >= 0.00001
assert benchmark.stats.stats.min >= 0.00001
10 changes: 9 additions & 1 deletion tests/test_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,12 @@ def __init__(self, **kwargs):
def __getitem__(self, item):
return self.__dict__[item]

def getoption(self, item, default=None):
try:
return self[item]
except KeyError:
return default


class LooseFileLike(BytesIO):
def close(self):
Expand All @@ -67,7 +73,9 @@ def __init__(self, name_format):
'min_rounds': 123,
'min_time': 234,
'max_time': 345,
'use_cprofile': False,
}
self.cprofile_sort_by = 'cumtime'
self.compare_fail = []
self.config = Namespace(hook=Namespace(
pytest_benchmark_group_stats=pytest_benchmark_group_stats,
Expand All @@ -87,7 +95,7 @@ def __init__(self, name_format):
data = json.load(fh)
self.benchmarks.extend(
Namespace(
as_dict=lambda include_data=False, stats=True, flat=False, _bench=bench:
as_dict=lambda include_data=False, stats=True, flat=False, _bench=bench, cprofile='cumtime':
dict(_bench, **_bench["stats"]) if flat else dict(_bench),
name=bench['name'],
fullname=bench['fullname'],
Expand Down

0 comments on commit e2acd74

Please sign in to comment.