Permalink
Browse files

Hey, look, working code!

  • Loading branch information...
0 parents commit ff2bd524cb255318adddaa6e2676ce4717f81728 @jacobian jacobian committed Jun 19, 2010
Showing with 2,246 additions and 0 deletions.
  1. +3 −0 .gitignore
  2. +28 −0 LICENSE
  3. +71 −0 README.rst
  4. 0 benchmarks/startup/__init__.py
  5. +10 −0 benchmarks/startup/benchmark.py
  6. +1 −0 benchmarks/startup/models.py
  7. +7 −0 benchmarks/startup/settings.py
  8. +82 −0 djangobench.py
  9. +2,042 −0 perf.py
  10. +2 −0 requirements.txt
@@ -0,0 +1,3 @@
+*.pyc
+django-control/
+django-experiment/
28 LICENSE
@@ -0,0 +1,28 @@
+[This is the new BSD license.]
+
+Copyright (c) 2010 Jacob Kaplan-Moss. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of this project nor the names of its contributors may
+ be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@@ -0,0 +1,71 @@
+Djangobench
+===========
+
+This is a harness for a (to-be-written) set of benchmarks for measuring
+Django's performance over time.
+
+Running the benchmarks
+----------------------
+
+This doesn't test a single Django version in isolation -- that wouldn't be
+very useful. Instead, it benchmarks an "experiment" Django against a
+"control", reporting on the difference between the two and measuring for
+statistical significance.
+
+So to run this, you'll need two complete Django source trees. By default
+``djangobench.py`` looks for directories named ``django-control`` and
+``django-experiment`` here in this directory, but you can change that
+by using the ``--control`` or ``--experiment`` options.
+
+So, for example, to benchmark Django 1.2 against trunk::
+
+ svn co http://code.djangoproject.com/svn/django/tags/releases/1.2/ django-control
+ svn co http://code.djangoproject.com/svn/django/trunk django-experiment
+ ./djangobench
+
+At the time of this writing Django's trunk hasn't significantly diverged
+from Django 1.2, so you should expect to see not-statistically-significant
+results::
+
+ Running 'startup' benchmark ...
+ Min: 0.138701 -> 0.138900: 1.0014x slower
+ Avg: 0.139009 -> 0.139378: 1.0027x slower
+ Not significant
+ Stddev: 0.00044 -> 0.00046: 1.0382x larger
+
+Writing new benchmarks
+----------------------
+
+Benchmarks are very simple: they're a Django app, along with a settings
+file, and an executable ``benchmarks.py`` that gets run by the harness.
+This file should print timing data to stdout.
+
+See the ``startup`` directory benchmark for an example.
+
+Please write new benchmarks and send me pull requests on Github!
+
+TODO
+----
+
+* Right now each benchmark gets run multiple times by the harness,
+ incurring startup overhead. The startup benchmark shows this is a non-trivial amount of time,
+ so there really needs to be a way for individual benchmarks to run n-trials in-process
+ to avoid that overhead and warmup time. Unladen's ``perf.py`` supports this; the harness
+ code needs to, also.
+
+* The number of trials is hard-coded. This should be an --option, or,
+ better yet, it could be automatically determined by running trials
+ until the results reach a particular confidence internal or some large
+ ceiling is hit.
+
+* Lots and lots and lots more benchmarks. Some ideas:
+
+ * template rendering (something useful, not the unladen one)
+ * ORM queries
+ * ORM overhead compared to cursor.execute()
+ * signal/dispatch
+ * datastructures (specifically MultiDict)
+ * url resolving and reversing
+ * form/model validation
+ * holistic request/response round-trip time
+ * middleware (piecemeal and standard "stacks" together)
No changes.
@@ -0,0 +1,10 @@
+import time
+t1 = time.time()
+
+# Make sure the models and settings are loaded, then we're done.
+# Calling get_models() will make sure settings get loaded.
+
+from django.db import models
+models.get_models()
+
+print time.time() - t1
@@ -0,0 +1 @@
+# Blank, yo.
@@ -0,0 +1,7 @@
+INSTALLED_APPS = ['startup']
+DATABASES = {
+ 'default': {
+ 'ENGINE':'sqlite3',
+ 'NAME': ':memory:'
+ }
+}
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+
+"""
+Run us some Django benchmarks.
+"""
+
+import os
+import subprocess
+import tempfile
+
+import argparse
+from unipath import DIRS, FSPath as Path
+
+import perf
+
+BENCMARK_DIR = Path(__file__).parent.child('benchmarks')
+
+def main(control, experiment, benchmark_dir=BENCMARK_DIR):
+ # Calculate the subshell envs that we'll use to execute the
+ # benchmarks in.
+ control_env = {
+ 'PYTHONPATH': "%s:%s" % (Path(benchmark_dir).absolute(), Path(control).parent.absolute()),
+ }
+ experiment_env = {
+ 'PYTHONPATH': "%s:%s" % (Path(benchmark_dir).absolute(), Path(experiment).parent.absolute()),
+ }
+
+ # TODO: make this configurable, or, better, make it an option
+ # to run until results are significant or some ceiling is hit.
+ trials = 5
+
+ results = []
+
+ for benchmark in discover_benchmarks(benchmark_dir):
+ print "Running '%s' benchmark ..." % benchmark.name
+ settings_mod = '%s.settings' % benchmark.name
+ control_env['DJANGO_SETTINGS_MODULE'] = settings_mod
+ experiment_env['DJANGO_SETTINGS_MODULE'] = settings_mod
+
+ control_data = perf.MeasureCommand(
+ ['python', '%s/benchmark.py' % benchmark],
+ iterations = trials,
+ env = control_env,
+ track_memory = False,
+ )
+
+ experiment_data = perf.MeasureCommand(
+ ['python', '%s/benchmark.py' % benchmark],
+ iterations = trials,
+ env = experiment_env,
+ track_memory = False,
+ )
+
+ options = argparse.Namespace(
+ track_memory = False,
+ diff_instrumentation = False,
+ benchmark_name = benchmark.name,
+ disable_timelines = True
+ )
+ result = perf.CompareBenchmarkData(control_data, experiment_data, options)
+ print result
+ print
+
+def discover_benchmarks(benchmark_dir):
+ for app in Path(benchmark_dir).listdir(filter=DIRS):
+ if app.child('benchmark.py').exists() and app.child('settings.py').exists():
+ yield app
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '--control',
+ default = 'django-control/django',
+ help = "Path to the Django code tree to use as control."
+ )
+ parser.add_argument(
+ '--experiment',
+ default = 'django-experiment/django',
+ help = "Path to the Django version to use as experiment."
+ )
+ args = parser.parse_args()
+ main(args.control, args.experiment)
Oops, something went wrong.

0 comments on commit ff2bd52

Please sign in to comment.