Skip to content

Commit

Permalink
Add compared values in the result table. Also change how commit_info …
Browse files Browse the repository at this point in the history
…is saved.
  • Loading branch information
ionelmc committed Aug 9, 2015
1 parent e5515aa commit a6285ff
Show file tree
Hide file tree
Showing 2 changed files with 68 additions and 15 deletions.
67 changes: 56 additions & 11 deletions src/pytest_benchmark/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
import json
import math
import platform
import re
import sys
import traceback
import time
from collections import defaultdict
from datetime import datetime
Expand All @@ -25,6 +25,7 @@
from .utils import SecondsDecimal
from .utils import first_or_false
from .utils import get_commit_id
from .utils import get_commit_info
from .utils import get_current_time
from .utils import load_timer
from .utils import parse_rounds
Expand Down Expand Up @@ -311,6 +312,15 @@ def warn(self, text):
if self.capman:
self.capman.resumecapture()

def error(self, text):
if self.capman:
self.capman.suspendcapture(in_=True)
self.term.sep("-", red=True, bold=True)
self.term.line(text, red=True, bold=True)
self.term.sep("-", red=True, bold=True)
if self.capman:
self.capman.resumecapture()

def info(self, text, **kwargs):
if self.capman:
self.capman.suspendcapture(in_=True)
Expand All @@ -325,6 +335,9 @@ def debug(self, text, **kwargs):


class BenchmarkSession(object):
compare_by_fullname = None
compare_by_name = None

def __init__(self, config):
self.verbose = config.getoption("benchmark_verbose")
self.logger = Logger(
Expand Down Expand Up @@ -371,14 +384,12 @@ def __init__(self, config):
files.sort()
self.compare = files[-1]
else:
rex = re.compile("^0?0?0?%s" % re.escape(self.compare))
files = [f for f in files if rex.match(str(f.basename))]
files = [f for f in files if str(f.basename).startswith(self.compare)]
if not files:
raise pytest.UsageError("No benchmark files matched %r" % self.compare)
elif len(files) > 1:
raise pytest.UsageError("Too many benchmark files matched %r: %s" % (self.compare, files))
self.compare, = files
self.logger.info("Comparing benchmark results to %s" % self.compare)
self.histogram = first_or_false(config.getoption("benchmark_histogram"))
self.json = config.getoption("benchmark_json")
self.group_by = config.getoption("benchmark_group_by")
Expand Down Expand Up @@ -437,11 +448,19 @@ def handle_loading(self):
self.config.hook.pytest_benchmark_compare_machine_info(config=self.config, benchmarksession=self,
machine_info=machine_info,
compared_benchmark=compared_benchmark)
self.compare_by_name = {bench['name']: bench for bench in compared_benchmark['benchmarks']}
self.compare_by_fullname = {bench['fullname']: bench for bench in compared_benchmark['benchmarks']}

self.logger.info("Comparing against benchmark %s:" % self.compare.basename, bold=True)
self.logger.info("| commit info: %s" % ", ".join("%s=%s" % i for i in compared_benchmark['commit_info'].items()))
self.logger.info("| saved at: %s" % compared_benchmark['datetime'])
self.logger.info("| saved using pytest-benchmark %s:" % compared_benchmark['version'])

def display(self, tr):
if not self.benchmarks:
return

tr.ensure_newline()
self.handle_saving()
self.handle_loading()

Expand Down Expand Up @@ -512,13 +531,37 @@ def display(self, tr):
for prop in "outliers", "rounds", "iterations":
tr.write("{0:>{1}}".format(bench[prop], widths[prop]))
tr.write("\n")
if self.compare:
if bench.fullname in self.compare_by_fullname:
self.display_compare_row(tr, widths, adjustment, bench,
self.compare_by_fullname[bench.fullname])
elif bench.name in self.compare_by_name:
self.display_compare_row(tr, widths, adjustment, bench, self.compare_by_name[bench.name])

tr.write_line("-" * sum(widths.values()), yellow=True)
tr.write_line("")
tr.write_line("(*) Outliers: 1 Standard Deviation from Mean; "
"1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.", bold=True, black=True)
tr.write_line("")

def display_compare_row(self, tr, widths, adjustment, bench, comp):
stats = comp['stats']
tr.write("".ljust(widths["name"]))
for prop in "min", "max", "mean", "stddev", "iqr":
val = bench[prop] - stats[prop]
strval = "{0:,.4f}".format(abs(val * adjustment))
if val > 0:
tr.write("{0:>{1}}".format("+", widths[prop] - len(strval)), bold=True, red=True)
tr.write(strval, red=True)
elif val < 0:
tr.write("{0:>{1}}".format("-", widths[prop] - len(strval)), bold=True, green=True)
tr.write(strval, green=True)
else:
tr.write("{0:>{1}}".format("NC", widths[prop]), bold=True, black=True)

for prop in "outliers", "rounds", "iterations":
tr.write("{0:>{1}}".format(stats[prop], widths[prop]))
tr.write("\n")


def pytest_benchmark_compare_machine_info(config, benchmarksession, machine_info, compared_benchmark):
if compared_benchmark['machine_info'] != machine_info:
Expand Down Expand Up @@ -554,7 +597,11 @@ def pytest_benchmark_group_stats(benchmarks, group_by):


def pytest_terminal_summary(terminalreporter):
terminalreporter.config._benchmarksession.display(terminalreporter)
try:
terminalreporter.config._benchmarksession.display(terminalreporter)
except Exception:
terminalreporter.config._benchmarksession.logger.error("\n%s" % traceback.format_exc())
raise


def pytest_benchmark_generate_machine_info():
Expand All @@ -571,9 +618,7 @@ def pytest_benchmark_generate_machine_info():


def pytest_benchmark_generate_commit_info():
return {
"id": get_commit_id(),
}
return get_commit_info()


def pytest_benchmark_generate_json(config, benchmarks):
Expand All @@ -596,9 +641,9 @@ def pytest_benchmark_generate_json(config, benchmarks):
'group': bench.group,
'name': bench.name,
'fullname': bench.fullname,
'stats': bench.json(),
'stats': dict(bench.json(), iterations=bench.iterations),
'options': dict(
iterations=bench.iterations,

**{k: v.__name__ if callable(v) else v for k, v in bench.options.items()}
)
})
Expand Down
16 changes: 12 additions & 4 deletions src/pytest_benchmark/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,21 +37,29 @@ def __repr__(self):


def get_commit_id():
suffix = ''
info = get_commit_info()
return '%s_%s%s' % (info['id'], get_current_time(), '_uncommitted-changes' if info['dirty'] else '')


def get_commit_info():
dirty = False
commit = 'unversioned'
if os.path.exists('.git'):
desc = subprocess.check_output('git describe --dirty --always --long --abbrev=40'.split()).strip()
desc = desc.split('-')
if desc[-1].strip() == 'dirty':
suffix = '_uncommitted-changes'
dirty = True
desc.pop()
commit = desc[-1].strip('g')
elif os.path.exists('.hg'):
desc = subprocess.check_output('hg id --id --debug'.split()).strip()
if desc[-1] == '+':
suffix = '_uncommitted-changes'
dirty = True
commit = desc.strip('+')
return '%s_%s%s' % (commit, get_current_time(), suffix)
return {
'id': commit,
'dirty': dirty
}


def get_current_time():
Expand Down

0 comments on commit a6285ff

Please sign in to comment.