Skip to content

Commit

Permalink
Add more tests against the storage part.
Browse files Browse the repository at this point in the history
  • Loading branch information
ionelmc committed Aug 11, 2015
1 parent 0c99443 commit 4eb3b63
Show file tree
Hide file tree
Showing 3 changed files with 233 additions and 41 deletions.
21 changes: 9 additions & 12 deletions src/pytest_benchmark/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -486,10 +486,9 @@ def handle_saving(self):
output_json=output_json
)
payload = json.dumps(output_json, indent=4)
if self.json:
with self.json as fh:
fh.write(payload)
self.logger.info("Wrote benchmark data in %s" % self.json, purple=True)
with self.json as fh:
fh.write(payload)
self.logger.info("Wrote benchmark data in %s" % self.json, purple=True)

if self.save or self.autosave:
output_json = self.config.hook.pytest_benchmark_generate_json(
Expand All @@ -503,19 +502,15 @@ def handle_saving(self):
output_json=output_json
)
payload = json.dumps(output_json, indent=4)
if self.json:
with self.json as fh:
fh.write(payload)
self.logger.info("Wrote benchmark data in %s" % self.json, purple=True)
output_file = None
if self.save:
output_file = self.storage.join("%s_%s.json" % (self.next_num, self.save))
assert not output_file.exists()
output_file.write_binary(payload)
output_file.write(payload)
elif self.autosave:
output_file = self.storage.join("%s_%s.json" % (self.next_num, get_commit_id()))
assert not output_file.exists()
output_file.write_binary(payload)
output_file.write(payload)
if output_file:
self.logger.info("Saved benchmark data in %s" % output_file)

Expand Down Expand Up @@ -563,7 +558,7 @@ def display(self, tr):

def check_regressions(self):
if self.performance_regressions:
self.logger.error("Performance has regressed: \n%s" % "\n".join(
self.logger.error("Performance has regressed:\n%s" % "\n".join(
"\t%s - %s" % line for line in self.performance_regressions
))
raise PerformanceRegression("Performance has regressed.")
Expand Down Expand Up @@ -600,6 +595,8 @@ def _box_points(self, serie, _):
output_file = py.path.local("%s-%s.svg" % (self.histogram, name))

table = list(self.generate_histogram_table(bench, history, sorted(history)))
from pprint import pprint
pprint(table)

unit, adjustment = time_unit(min(
row[self.sort]
Expand Down Expand Up @@ -850,7 +847,7 @@ def pytest_benchmark_generate_machine_info():
}


def pytest_benchmark_generate_commit_info():
def pytest_benchmark_generate_commit_info(config):
return get_commit_info()


Expand Down
252 changes: 223 additions & 29 deletions tests/test_storage.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,104 @@
import logging
import json
import logging

import py
import pytest

from freezegun import freeze_time

try:
from io import StringIO
except ImportError:
from StringIO import StringIO
from cStringIO import StringIO

import py
import pytest
from pytest_benchmark.plugin import BenchmarkSession, PerformanceRegression
from pytest_benchmark.plugin import pytest_benchmark_compare_machine_info
from pytest_benchmark.plugin import pytest_benchmark_generate_json
from pytest_benchmark.plugin import pytest_benchmark_group_stats
from pytest_benchmark.utils import PercentageRegressionCheck, DifferenceRegressionCheck

from pytest_benchmark.plugin import BenchmarkSession, pytest_benchmark_group_stats, pytest_benchmark_compare_machine_info
SAVE_DATA = {
"commit_info": {
'foo': 'bar',
},
"version": "2.5.0",
"benchmarks": [
{
"stats": {
'include_data': False,
"q1": 19.35233497619629,
"q3": 20.36447501182556,
"iterations": 1,
"min": 19.316043853759766,
"max": 21.620103120803833,
"median": 19.9351589679718,
"iqr": 1.0121400356292725,
"stddev_outliers": 2,
"stddev": 0.7074680670532808,
"outliers": "2;0",
"iqr_outliers": 0,
"rounds": 10,
"mean": 20.049841284751892
},
"fullname": "tests/test_func/test_perf.py::test_engine",
"group": None,
"name": "test_engine",
"options": {
"disable_gc": False,
"warmup": False,
"timer": "time",
"min_rounds": 10,
"max_time": 1.0,
"min_time": 2.5e-05
}
}
],
"machine_info": {
"foo": "bar",
},
"datetime": "2012-01-14T12:00:01"
}
JSON_DATA = {
"commit_info": {
'foo': 'bar',
},
"version": "2.5.0",
"benchmarks": [
{
"stats": {
'include_data': True,
"q1": 19.35233497619629,
"q3": 20.36447501182556,
"iterations": 1,
"min": 19.316043853759766,
"max": 21.620103120803833,
"median": 19.9351589679718,
"iqr": 1.0121400356292725,
"stddev_outliers": 2,
"stddev": 0.7074680670532808,
"outliers": "2;0",
"iqr_outliers": 0,
"rounds": 10,
"mean": 20.049841284751892
},
"fullname": "tests/test_func/test_perf.py::test_engine",
"group": None,
"name": "test_engine",
"options": {
"disable_gc": False,
"warmup": False,
"timer": "time",
"min_rounds": 10,
"max_time": 1.0,
"min_time": 2.5e-05
}
}
],
"machine_info": {
"foo": "bar",
},
"datetime": "2012-01-14T12:00:01"
}


class Namespace(object):
Expand All @@ -19,6 +108,14 @@ def __init__(self, **kwargs):
def __getitem__(self, item):
return self.__dict__[item]


class TestFriendlyStringIO(StringIO):
def close(self):
value = self.getvalue()
super(TestFriendlyStringIO, self).close()
self.getvalue = lambda: value


class MockSession(BenchmarkSession):
def __init__(self):
self.histogram = True
Expand All @@ -39,17 +136,22 @@ def __init__(self):
pytest_benchmark_generate_machine_info=lambda **kwargs: {'foo': 'bar'},
pytest_benchmark_update_machine_info=lambda **kwargs: None,
pytest_benchmark_compare_machine_info=pytest_benchmark_compare_machine_info,
pytest_benchmark_generate_json=pytest_benchmark_generate_json,
pytest_benchmark_update_json=lambda **kwargs: None,
pytest_benchmark_generate_commit_info=lambda **kwargs: {'foo': 'bar'},
pytest_benchmark_update_commit_info=lambda **kwargs: None,
))
self.group_by = 'group'
for bench_file in self.storage.listdir("[0-9][0-9][0-9][0-9]_*.json"):
with bench_file.open('rU') as fh:
data = json.load(fh)
self.benchmarks.extend(
Namespace(
json=lambda: bench['stats'],
json=lambda include_data=False: dict(bench['stats'], include_data=include_data),
name=bench['name'],
fullname=bench['fullname'],
group=bench['group'],
options=bench['options'],
**bench['stats']
)
for bench in data['benchmarks']
Expand All @@ -62,37 +164,129 @@ def sess(request):
return MockSession()


def test_rendering(sess):
sess.handle_histogram()


def test_compare(sess):
# self.handle_saving()
# self.handle_loading()
# self.display_results_table(tr)
# self.check_regressions()
# self.handle_histogram()
def make_logger(sess):
output = StringIO()
sess.logger = Namespace(
warn=lambda text: output.write(text + '\n'),
info=lambda text, **opts: output.write(text + '\n'),
error=lambda text: output.write(text + '\n'),
)
return output


def test_rendering(sess):
sess.handle_histogram()


def test_regression_checks(sess):
output = make_logger(sess)
sess.handle_loading()
sess.compare == '0001'
sess.performance_regressions = []
sess.compare_fail = [
PercentageRegressionCheck("stddev", 5),
DifferenceRegressionCheck("max", 0.5)
]
sess.display_results_table(Namespace(
write_line=lambda line, **opts: output.write(line + '\n'),
write=lambda text, **opts: output.write(text),
))
assert output.getvalue() == """Benchmark machine_info is different. Current: {foo: 'bar'} VS saved: {machine: 'x86_64', node: 'jenkins', processor: 'x86_64', python_compiler: 'GCC 4.6.3', python_implementation: 'CPython', python_version: '2.7.3', release: '3.13.0-53-generic', system: 'Linux'}.
Comparing against benchmark 0001_b692275e28a23b5d4aae70f453079ba593e60290_20150811_052350.json:
| commit info: {dirty: False, id: 'b692275e28a23b5d4aae70f453079ba593e60290'}
| saved at: 2015-08-11T02:23:50.661428
| saved using pytest-benchmark 2.5.0:
-------------------------------------- benchmark: 1 tests, min 123 rounds (of min 234), 345 max time, timer: None --------------------------------------
Name (time in s) Min Max Mean StdDev Median IQR Outliers(*) Rounds Iterations
--------------------------------------------------------------------------------------------------------------------------------------------------------
test_engine 19.3160 21.6201 20.0498 0.7075 19.9352 1.0121 2;0 10 1
-0.0062 (0%) +0.6172 (2%) +0.0157 (0%) +0.1485 (26%) +0.0806 (0%) +0.3144 (45%) 4;0 10 1
--------------------------------------------------------------------------------------------------------------------------------------------------------
(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.
assert sess.performance_regressions == [
('tests/test_func/test_perf.py::test_engine',
'Field stddev has failed PercentageRegressionCheck: 26.572963937 > '
'5.000000000'),
('tests/test_func/test_perf.py::test_engine',
'Field max has failed DifferenceRegressionCheck: 0.617182970 > 0.500000000')
]
output = make_logger(sess)
pytest.raises(PerformanceRegression, sess.check_regressions)
assert output.getvalue() == """Performance has regressed:
\ttests/test_func/test_perf.py::test_engine - Field stddev has failed PercentageRegressionCheck: 26.572963937 \
> 5.000000000
\ttests/test_func/test_perf.py::test_engine - Field max has failed DifferenceRegressionCheck: 0.617182970 > \
0.500000000
"""



def test_compare(sess):
output = make_logger(sess)
sess.handle_loading()
sess.display_results_table(Namespace(
write_line=lambda line, **opts: output.write(line + '\n'),
write=lambda text, **opts: output.write(text),
))
assert output.getvalue() == (
"Benchmark machine_info is different. Current: {foo: 'bar'} VS saved: {"
"machine: 'x86_64', node: 'jenkins', processor: 'x86_64', python_compiler: 'GCC 4.6.3', "
"python_implementation: 'CPython', python_version: '2.7.3', release: '3.13.0-53-generic', system: 'Linux'}.\n"
"Comparing against benchmark 0001_b692275e28a23b5d4aae70f453079ba593e60290_20150811_052350.json:\n"
"| commit info: {dirty: False, id: 'b692275e28a23b5d4aae70f453079ba593e60290'}\n"
"| saved at: 2015-08-11T02:23:50.661428\n"
"| saved using pytest-benchmark 2.5.0:\n"
"-------------------------------------- benchmark: 1 tests, min 123 rounds (of min 234), "
"345 max time, timer: None --------------------------------------\n"
"Name (time in s) Min Max Mean StdDev Median "
"IQR Outliers(*) Rounds Iterations\n"
"--------------------------------------------------------------------------------------------------------------"
"------------------------------------------\n"
"test_engine 19.3160 21.6201 20.0498 0.7075 19.9352 "
"1.0121 2;0 10 1\n"
" -0.0062 (0%) +0.6172 (2%) +0.0157 (0%) +0.1485 (26%) +0.0806 (0%) "
"+0.3144 (45%) 4;0 10 1\n"
"--------------------------------------------------------------------------------------------------------------"
"------------------------------------------\n"
"(*) Outliers: 1 Standard Deviation from Mean; "
"1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.\n"
"\n"
)


@freeze_time("2012-01-14 12:00:01")
def test_save_json(sess, tmpdir):
sess.save = False
sess.autosave = False
sess.json = TestFriendlyStringIO()
sess.save_data = False
sess.handle_saving()
assert tmpdir.listdir() == []
assert json.loads(sess.json.getvalue()) == JSON_DATA


@freeze_time("2012-01-14 12:00:01")
def test_save_with_name(sess, tmpdir):
sess.save = 'foobar'
sess.autosave = True
sess.json = None
sess.save_data = False
sess.storage = tmpdir
sess.handle_saving()
files = tmpdir.listdir()
assert len(files) == 1
assert json.loads(files[0].read()) == SAVE_DATA


@freeze_time("2012-01-14 12:00:01")
def test_save_no_name(sess, tmpdir):
sess.save = True
sess.autosave = True
sess.json = None
sess.save_data = False
sess.storage = tmpdir
sess.handle_saving()
files = tmpdir.listdir()
assert len(files) == 1
assert json.loads(files[0].read()) == SAVE_DATA


@freeze_time("2012-01-14 12:00:01")
def test_autosave(sess, tmpdir):
sess.save = False
sess.autosave = True
sess.json = None
sess.save_data = False
sess.storage = tmpdir
sess.handle_saving()
files = tmpdir.listdir()
assert len(files) == 1
assert json.loads(files[0].read()) == SAVE_DATA
1 change: 1 addition & 0 deletions tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ deps =
pypy: jitviewer
aspectlib==1.3.0
pygal==2.0.1
freezegun==0.3.5

commands =
{posargs:py.test --cov=src --cov-report=term-missing -vv}
Expand Down

0 comments on commit 4eb3b63

Please sign in to comment.