Skip to content

Commit

Permalink
Add a --benchmark-disable option.
Browse files Browse the repository at this point in the history
  • Loading branch information
ionelmc committed Oct 17, 2015
1 parent ea3c26d commit d6fef22
Show file tree
Hide file tree
Showing 3 changed files with 81 additions and 30 deletions.
13 changes: 11 additions & 2 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,7 +1,15 @@

Changelog
=========

3.0.0b2 (2015-10-17)
---------------------

* Add a ``--benchmark-disable`` option. It's automatically activated when xdist is on
* When xdist is on or `statistics` can't be imported then ``--benchmark-disable`` is automatically activated (instead
of ``--benchmark-skip``). *BACKWARDS INCOMPATIBLE*
* Replace the deprecated ``__multicall__`` with the new hookwrapper system.
* Improved description for ``--benchmark-max-time``.

3.0.0b1 (2015-10-13)
--------------------

Expand Down Expand Up @@ -37,7 +45,8 @@ Changelog
* Added option to fine tune the calibration (the ``--benchmark-calibration-precision`` command line argument and
``calibration_precision`` marker option).

* Changed ``benchmark_weave`` to no longer be a context manager. Cleanup is performed automatically. *BACKWARDS INCOMPATIBLE*
* Changed ``benchmark_weave`` to no longer be a context manager. Cleanup is performed automatically. *BACKWARDS
INCOMPATIBLE*
* Added ``benchmark.weave`` method (alternative to ``benchmark_weave`` fixture).

* Added new hooks to allow customization:
Expand Down
71 changes: 45 additions & 26 deletions src/pytest_benchmark/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,13 @@ def pytest_addoption(parser):
group.addoption(
"--benchmark-skip",
action="store_true", default=False,
help="Skip running any benchmarks."
help="Skip running any tests that contain benchmarks."
)
group.addoption(
"--benchmark-disable",
action="store_true", default=False,
help="Disable benchmarks. Benchmarked functions are only ran once and no stats are reported. Use this is you "
"want to run the test but don't do any benchmarking."
)
group.addoption(
"--benchmark-only",
Expand Down Expand Up @@ -238,9 +244,10 @@ def _get_precision(cls, timer):
return cls._precisions.setdefault(timer, compute_timer_precision(timer))

def __init__(self, node, disable_gc, timer, min_rounds, min_time, max_time, warmup, warmup_iterations,
calibration_precision, add_stats, logger, group=None):
calibration_precision, add_stats, logger, disable, group=None):
self.name = node.name
self.fullname = node._nodeid
self.disable = disable
self.param = node.callspec.id if hasattr(node, 'callspec') else None
self.group = group

Expand Down Expand Up @@ -294,28 +301,28 @@ def _make_stats(self, iterations):
return stats

def __call__(self, function_to_benchmark, *args, **kwargs):
runner = self._make_runner(function_to_benchmark, args, kwargs)
if not self.disable:
runner = self._make_runner(function_to_benchmark, args, kwargs)

duration, iterations, loops_range = self._calibrate_timer(runner)
duration, iterations, loops_range = self._calibrate_timer(runner)

# Choose how many time we must repeat the test
rounds = int(ceil(self._max_time / duration))
rounds = max(rounds, self._min_rounds)
rounds = min(rounds, sys.maxsize)

stats = self._make_stats(iterations)
# Choose how many time we must repeat the test
rounds = int(ceil(self._max_time / duration))
rounds = max(rounds, self._min_rounds)
rounds = min(rounds, sys.maxsize)

self._logger.debug(" Running %s rounds x %s iterations ..." % (rounds, iterations), yellow=True, bold=True)
run_start = time.time()
if self._warmup:
warmup_rounds = min(rounds, max(1, int(self._warmup / iterations)))
self._logger.debug(" Warmup %s rounds x %s iterations ..." % (warmup_rounds, iterations))
for _ in XRANGE(warmup_rounds):
runner(loops_range)
for _ in XRANGE(rounds):
stats.update(runner(loops_range))
self._logger.debug(" Ran for %ss." % format_time(time.time() - run_start), yellow=True, bold=True)
stats = self._make_stats(iterations)

self._logger.debug(" Running %s rounds x %s iterations ..." % (rounds, iterations), yellow=True, bold=True)
run_start = time.time()
if self._warmup:
warmup_rounds = min(rounds, max(1, int(self._warmup / iterations)))
self._logger.debug(" Warmup %s rounds x %s iterations ..." % (warmup_rounds, iterations))
for _ in XRANGE(warmup_rounds):
runner(loops_range)
for _ in XRANGE(rounds):
stats.update(runner(loops_range))
self._logger.debug(" Ran for %ss." % format_time(time.time() - run_start), yellow=True, bold=True)
return function_to_benchmark(*args, **kwargs)

def pedantic(self, target, args=(), kwargs=None, setup=None, rounds=1, warmup_rounds=0, iterations=1):
Expand Down Expand Up @@ -345,6 +352,10 @@ def make_arguments(args=args, kwargs=kwargs):
args, kwargs = maybe_args
return args, kwargs

if self.disable:
args, kwargs = make_arguments()
return target(*args, **kwargs)

stats = self._make_stats(iterations)
loops_range = XRANGE(iterations) if iterations > 1 else None
for _ in XRANGE(warmup_rounds):
Expand Down Expand Up @@ -487,24 +498,31 @@ def __init__(self, config):
warmup_iterations=config.getoption("benchmark_warmup_iterations"),
)
self.skip = config.getoption("benchmark_skip")
self.disable = config.getoption("benchmark_disable")

if config.getoption("dist", "no") != "no" and not self.skip:
self.logger.warn(
"Benchmarks are automatically skipped because xdist plugin is active."
"Benchmarks are automatically disabled because xdist plugin is active."
"Benchmarks cannot be performed reliably in a parallelized environment.",
)
self.skip = True
self.disable = True
if hasattr(config, "slaveinput"):
self.skip = True
self.disable = True
if not statistics:
self.logger.warn(
"Benchmarks are automatically skipped because we could not import `statistics`\n\n%s" % statistics_error
"Benchmarks are automatically disabled because we could not import `statistics`\n\n%s" %
statistics_error
)
self.skip = True
self.disable = True

self.only = config.getoption("benchmark_only")
self.sort = config.getoption("benchmark_sort")
if self.skip and self.only:
raise pytest.UsageError("Can't have both --benchmark-only and --benchmark-skip options.")
if self.disable and self.only:
raise pytest.UsageError(
"Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is "
"automatically activated if xdist is on or you're missing the statistics dependency.")
self._benchmarks = []
self.group_by = config.getoption("benchmark_group_by")
self.save = config.getoption("benchmark_save")
Expand Down Expand Up @@ -945,7 +963,7 @@ def benchmark(request):
bs = request.config._benchmarksession

if bs.skip:
pytest.skip("Benchmarks are disabled.")
pytest.skip("Benchmarks are skipped (--benchmark-skip was used).")
else:
node = request.node
marker = node.get_marker("benchmark")
Expand All @@ -956,6 +974,7 @@ def benchmark(request):
node,
add_stats=bs._benchmarks.append,
logger=bs.logger,
disable=bs.disable,
**dict(bs.options, **options)
)
request.addfinalizer(fixture._cleanup)
Expand Down
27 changes: 25 additions & 2 deletions tests/test_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def test_help(testdir):
" --benchmark-verbose Dump diagnostic and progress information.",
" --benchmark-disable-gc",
" Disable GC during benchmarks.",
" --benchmark-skip Skip running any benchmarks.",
" --benchmark-skip Skip running any tests that contain benchmarks.",
" --benchmark-only Only run benchmarks.",
" --benchmark-save=NAME",
" Save the current run into 'STORAGE-PATH/counter-",
Expand Down Expand Up @@ -325,6 +325,15 @@ def test_conflict_between_only_and_skip(testdir):
])


def test_conflict_between_only_and_disable(testdir):
test = testdir.makepyfile(SIMPLE_TEST)
result = testdir.runpytest('--benchmark-only', '--benchmark-disable', test)
result.stderr.fnmatch_lines([
"ERROR: Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is "
"automatically activated if xdist is on or you're missing the statistics dependency."
])


def test_max_time_min_rounds(testdir):
test = testdir.makepyfile(SIMPLE_TEST)
result = testdir.runpytest('--doctest-modules', '--benchmark-max-time=0.000001', '--benchmark-min-rounds=1', test)
Expand Down Expand Up @@ -647,7 +656,7 @@ def test_xdist(testdir):
result = testdir.runpytest('--doctest-modules', '-n', '1', test)
result.stderr.fnmatch_lines([
"------*",
" WARNING: Benchmarks are automatically skipped because xdist plugin is active.Benchmarks cannot be performed "
" WARNING: Benchmarks are automatically disabled because xdist plugin is active.Benchmarks cannot be performed "
"reliably in a parallelized environment.",
"------*",
])
Expand Down Expand Up @@ -855,6 +864,20 @@ def test_skip(testdir):
])


def test_disable(testdir):
test = testdir.makepyfile(BASIC_TEST)
result = testdir.runpytest('-vv', '--doctest-modules', '--benchmark-disable', test)
result.stdout.fnmatch_lines([
"*collected 5 items",
"test_disable.py::*test_disable PASSED",
"test_disable.py::test_slow PASSED",
"test_disable.py::test_slower PASSED",
"test_disable.py::test_xfast PASSED",
"test_disable.py::test_fast PASSED",
"*====== 5 passed * seconds ======*",
])


def test_mark_selection(testdir):
test = testdir.makepyfile(BASIC_TEST)
result = testdir.runpytest('-vv', '--doctest-modules', '-m', 'benchmark', test)
Expand Down

0 comments on commit d6fef22

Please sign in to comment.