Skip to content

Commit

Permalink
Merge pull request #34 from jvanasco/feature-profiling_code
Browse files Browse the repository at this point in the history
Feature profiling code
  • Loading branch information
mahmoud committed Feb 26, 2017
2 parents 8c92a7b + 7f3dc62 commit f42c700
Show file tree
Hide file tree
Showing 4 changed files with 143 additions and 8 deletions.
21 changes: 19 additions & 2 deletions run_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
from tests.OrderedDict import OrderedDict
from tests.argparse import ArgumentParser

import tests
from ashes import AshesEnv, Template
import tests
from tests import ALL_TEST_MODULES, OPS, AshesTest
import unittest

Expand Down Expand Up @@ -155,6 +155,8 @@ def parse_args():
help='run unittests')
prs.add_argument('--disable_core', action='store_true',
help='disable core tests')
prs.add_argument('--benchtest', action='store_true',
help='run testing benchmark; disables everything else')
return prs.parse_args()


Expand All @@ -165,6 +167,14 @@ def main(width=DEFAULT_WIDTH):
run_benchmarks = args.benchmark or False
run_unittests = args.run_unittests or False
disable_core = args.disable_core or False

# if we're running the benchtest for profiling, thats it!
run_benchtest = args.benchtest or False
if run_benchtest:
disable_core = True
run_benchmarks = False
run_unittests = False

if not disable_core:
if not name:
# remember `tests` is a namespace. don't overwrite!
Expand Down Expand Up @@ -198,9 +208,16 @@ def main(width=DEFAULT_WIDTH):
results = runner.run(big_suite)
# toggled!
if run_benchmarks:
tests.benchmarks.bench_render_a()
import tests
tests.benchmarks.bench_render_repeat()
tests.benchmarks.bench_render_reinit()
tests.benchmarks.bench_cacheable_templates()

if run_benchtest:
import tests.utils_profiling
import time
filename_stats = 'stats-%s.csv' % time.time()
tests.utils_profiling.profile_function(tests.benchmarks.bench_render_repeat, filename_stats)

if __name__ == '__main__':
main()
2 changes: 1 addition & 1 deletion tests/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from .core import AshesTest, OPS
from . import dust_site, comp_helpers, new_features, regressions, template_loaders, benchmarks
from . import dust_site, comp_helpers, new_features, regressions, template_loaders, benchmarks, utils_profiling

ALL_TEST_MODULES = [dust_site, comp_helpers, new_features, regressions, template_loaders]
49 changes: 44 additions & 5 deletions tests/benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,56 @@

from . import utils

__all__ = ['bench_render_a', 'bench_cacheable_templates', ]
__all__ = ['bench_render_reinit',
'bench_render_repeat',
'bench_cacheable_templates',
]

# ==============================================================================


def bench_render_a():
def bench_render_repeat():
"""
this bench is designed as a baseline for performance comparisons when
adjusting the code
This bench is designed as a baseline for performance comparisons when
adjusting the code.
A single template loader is re-used for all iterations
"""
print("running benchmarks: bench_render_a...")
print("running benchmarks: bench_render_repeat...")
if utils.ChertDefaults is None:
utils.ChertDefaults = utils._ChertDefaults()

_ashesLoader = ashes.TemplatePathLoader(utils._chert_dir)
_ashesEnv = ashes.AshesEnv(loaders=(_ashesLoader, ))

def test_baseline_chert():
"""
test_baseline_chert
this just runs though all the chert templates using a default `TemplatePathLoader`
"""
renders = {}
for (fname, fdata) in utils.ChertDefaults.chert_data.items():
rendered = _ashesEnv.render(fname, fdata)
renders[fname] = fdata

timed = {}
ranged = range(0, 1000)
timed["baseline_chert"] = []
for i in ranged:
t_start = time.time()
test_baseline_chert()
t_fin = time.time()
timed["baseline_chert"] .append(t_fin - t_start)
utils.print_timed(timed)


def bench_render_reinit():
"""
This bench is designed as a baseline for performance comparisons when
adjusting the code.
This will create (reinitialize) a new Ashes loader and environment for each
test iteration.
"""
print("running benchmarks: bench_render_reinit...")
if utils.ChertDefaults is None:
utils.ChertDefaults = utils._ChertDefaults()

Expand Down
79 changes: 79 additions & 0 deletions tests/utils_profiling.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
import cProfile as profile
import pstats
import pprint
import sys
import os
import csv
import time

basedir = os.path.dirname(__file__)
basedir_len = len(basedir)

def format_fname(value, _sys_path=None):
"""from pyramid_debugtoolbar"""
if _sys_path is None:
_sys_path = sys.path # dependency injection
# If the value is not an absolute path, the it is a builtin or
# a relative file (thus a project file).
if not os.path.isabs(value):
if value.startswith(('{', '<')):
return value
if value.startswith('.' + os.path.sep):
return value
return '.' + os.path.sep + value


def profile_function(to_profile, filename_stats=None):
"""largely from pyramid_debugtoolbar"""
profiler = profile.Profile()
result = profiler.runcall(to_profile)
stats = pstats.Stats(profiler)
function_calls = []
flist = stats.sort_stats('cumulative').fcn_list

for func in flist:
current = {}
info = stats.stats[func]

# Number of calls
if info[0] != info[1]:
current['ncalls'] = '%d/%d' % (info[1], info[0])
else:
current['ncalls'] = info[1]

# Total time
current['tottime'] = info[2] * 1000

# Quotient of total time divided by number of calls
if info[1]:
current['percall'] = info[2] * 1000 / info[1]
else:
current['percall'] = 0

# Cumulative time
current['cumtime'] = info[3] * 1000

# Quotient of the cumulative time divded by the number
# of primitive calls.
if info[0]:
current['percall_cum'] = info[3] * 1000 / info[0]
else:
current['percall_cum'] = 0

# Filename
filename = pstats.func_std_string(func)
current['filename_long'] = filename
current['filename'] = format_fname(filename)
function_calls.append(current)

keys = function_calls[0].keys()
if filename_stats:
with open(filename_stats, 'w') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(function_calls)
print("wrote to %s" % filename_stats)
else:
print("returning (function_calls)")
return function_calls

0 comments on commit f42c700

Please sign in to comment.