Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
93 changes: 93 additions & 0 deletions pyperformance/tests/__init__.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,18 @@
import contextlib
import errno
import os
import os.path
import shutil
import subprocess
import sys
import tempfile


TESTS_ROOT = os.path.realpath(os.path.dirname(__file__))
DATA_DIR = os.path.join(TESTS_ROOT, 'data')
REPO_ROOT = os.path.dirname(os.path.dirname(TESTS_ROOT))


@contextlib.contextmanager
def temporary_file(**kwargs):
tmp_filename = tempfile.mktemp(**kwargs)
Expand All @@ -15,3 +24,87 @@ def temporary_file(**kwargs):
except OSError as exc:
if exc.errno != errno.ENOENT:
raise


def run_cmd(*argv):
print(f"(tests) Execute: {' '.join(argv)}", flush=True)
proc = subprocess.Popen(argv, cwd=REPO_ROOT)
try:
proc.wait()
except: # noqa
proc.kill()
proc.wait()
raise
sys.stdout.flush()
exitcode = proc.returncode
if exitcode:
sys.exit(exitcode)
print("", flush=True)


#############################
# functional tests

class Functional:
"""A mixin for functional tests."""

# XXX Disallow multi-proc or threaded test runs?
_TMPDIR = None
_VENV = None
_COUNT = 0

maxDiff = 80 * 100

@classmethod
def setUpClass(cls):
tmpdir = Functional._TMPDIR
if tmpdir is None:
tmpdir = Functional._TMPDIR = tempfile.mkdtemp()
venv = Functional._VENV = os.path.join(tmpdir, 'venv')
run_cmd(
sys.executable, '-u', '-m', 'pyperformance',
'venv', 'create',
'-b', 'all',
'--venv', venv,
)

egg_info = "pyperformance.egg-info"
print(f"(tests) Remove directory {egg_info}", flush=True)
try:
shutil.rmtree(egg_info)
except FileNotFoundError:
pass
Functional._COUNT += 1
super().setUpClass()

@classmethod
def tearDownClass(cls):
super().tearDownClass()
tmpdir = Functional._TMPDIR
venv = Functional._VENV
if Functional._COUNT == 1:
assert venv
run_cmd(
sys.executable, '-u', '-m', 'pyperformance',
'venv', 'remove',
'--venv', venv,
)
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)

@property
def venv_python(self):
if os.name == "nt":
python = os.path.basename(sys.executable)
return os.path.join(self._VENV, 'Scripts', python)
else:
return os.path.join(self._VENV, 'bin', 'python3')

def run_pyperformance(self, cmd, *args, invenv=True):
if invenv:
assert self._VENV
args += ('--venv', self._VENV)
run_cmd(
sys.executable, '-u', '-m', 'pyperformance',
cmd, *args,
)
17 changes: 17 additions & 0 deletions pyperformance/tests/__main__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import unittest

from pyperformance import tests


def load_tests(loader, standard_tests, pattern):
pkgtests = loader.discover(
start_dir=tests.TESTS_ROOT,
top_level_dir=tests.TESTS_ROOT,
pattern=pattern or 'test*',
)
standard_tests.addTests(pkgtests)
return standard_tests


if __name__ == "__main__":
unittest.main()
30 changes: 3 additions & 27 deletions pyperformance/tests/test_compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,31 +9,7 @@
from pyperformance import tests


DATA_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), 'data'))


def run_cmd(cmd):
print("Execute: %s" % ' '.join(cmd))
proc = subprocess.Popen(cmd)
try:
proc.wait()
except: # noqa
proc.kill()
proc.wait()
raise

exitcode = proc.returncode
if exitcode:
sys.exit(exitcode)


class CompareTests(unittest.TestCase):
maxDiff = 80 * 100

@classmethod
def setUpClass(cls):
cmd = [sys.executable, '-m', 'pyperformance', 'venv', 'create']
run_cmd(cmd)
class FunctionalTests(tests.Functional, unittest.TestCase):

def compare(self, *args, **kw):
dataset = kw.get('dataset', 'py')
Expand All @@ -48,8 +24,8 @@ def compare(self, *args, **kw):
marker = file1

cmd = [sys.executable, '-m', 'pyperformance', 'compare',
os.path.join(DATA_DIR, file1),
os.path.join(DATA_DIR, file2)]
os.path.join(tests.DATA_DIR, file1),
os.path.join(tests.DATA_DIR, file2)]
cmd.extend(args)
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
Expand Down
16 changes: 16 additions & 0 deletions pyperformance/tests/test_list.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import unittest

from pyperformance import tests


class FunctionalTests(tests.Functional, unittest.TestCase):

def test_list(self):
self.run_pyperformance('list')

def test_list_groups(self):
self.run_pyperformance('list_groups')


if __name__ == "__main__":
unittest.main()
30 changes: 30 additions & 0 deletions pyperformance/tests/test_run.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import os.path
import unittest

from pyperformance import tests


class FunctionalTests(tests.Functional, unittest.TestCase):

def test_run_and_show(self):
json = os.path.join(self._TMPDIR, 'bench.json')

# -b all: check that *all* benchmark work
#
# --debug-single-value: benchmark results don't matter, we only
# check that running benchmarks don't fail.
self.run_pyperformance('run', '-b', 'all',
'--debug-single-value',
'-o', json)

# Display slowest benchmarks
tests.run_cmd(
self.venv_python, '-u',
'-m', 'pyperf',
'slowest',
json,
)


if __name__ == "__main__":
unittest.main()
19 changes: 19 additions & 0 deletions pyperformance/tests/test_show.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import os.path
import unittest

from pyperformance import tests


class FunctionalTests(tests.Functional, unittest.TestCase):

def test_show(self):
for filename in (
os.path.join(tests.DATA_DIR, 'py36.json'),
os.path.join(tests.DATA_DIR, 'mem1.json'),
):
with self.subTest(filename):
self.run_pyperformance('show', filename, invenv=False)


if __name__ == "__main__":
unittest.main()
87 changes: 4 additions & 83 deletions runtests.py
Original file line number Diff line number Diff line change
@@ -1,93 +1,14 @@
#!/usr/bin/env python3
import os.path
import shutil
import subprocess
import sys
import tempfile


def run_cmd(cmd):
print("(runtests.py) Execute: %s" % ' '.join(cmd), flush=True)
proc = subprocess.Popen(cmd)
try:
proc.wait()
except: # noqa
proc.kill()
proc.wait()
raise
sys.stdout.flush()
exitcode = proc.returncode
if exitcode:
sys.exit(exitcode)
print("", flush=True)


def run_tests(venv):
# Move to the root directly
root = os.path.dirname(__file__)
if root:
os.chdir(root)

python = sys.executable
script = 'pyperformance'
if os.name == "nt":
python_executable = os.path.basename(python)
venv_python = os.path.join(venv, 'Scripts', python_executable)
else:
venv_python = os.path.join(venv, 'bin', 'python')

def run_bench(*cmd):
cmd = cmd + ('--venv', venv)
run_cmd(cmd)

run_bench(python, '-u', script, 'venv', 'create', '-b', 'all')

egg_info = "pyperformance.egg-info"
print("(runtests.py) Remove directory %s" % egg_info, flush=True)
try:
shutil.rmtree(egg_info)
except FileNotFoundError:
pass

run_bench(python, '-u', script, 'venv', 'create')

for filename in (
os.path.join('pyperformance', 'tests', 'data', 'py36.json'),
os.path.join('pyperformance', 'tests', 'data', 'mem1.json'),
):
run_cmd((python, script, 'show', filename))

run_bench(python, '-u', script, 'list')
run_bench(python, '-u', script, 'list_groups')

json = os.path.join(venv, 'bench.json')

# -b all: check that *all* benchmark work
#
# --debug-single-value: benchmark results don't matter, we only
# check that running benchmarks don't fail.
run_bench(python, '-u', script, 'run', '-b', 'all', '--debug-single-value',
'-o', json)

# Display slowest benchmarks
run_cmd((venv_python, '-u', '-m', 'pyperf', 'slowest', json))

run_bench(python, '-u', script, 'venv', 'remove')


def main():
# Unit tests
cmd = [sys.executable, '-u',
os.path.join('pyperformance', 'tests', 'test_compare.py')]
run_cmd(cmd)

# Functional tests
tmpdir = tempfile.mkdtemp()
try:
run_tests(tmpdir)
finally:
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
subprocess.run(
[sys.executable, '-u', '-m', 'pyperformance.tests'],
cwd=os.path.dirname(__file__) or None,
)


if __name__ == "__main__":
Expand Down