Skip to content
Browse files

added nodatabase test runner, including coverage option

  • Loading branch information...
1 parent 73a5d35 commit 431577290e4012a6a7ece905c77c2fe26b38fad1 @garethr garethr committed Sep 13, 2009
View
7 README.textile
@@ -25,6 +25,13 @@ If you want to know what code is being run when you run your test suite then cod
<pre>python manage.py test --coverage<pre>
<pre>python manage.py test --figleaf</pre>
+h2. No Database
+
+Sometimes your don't want the overhead of setting up a database during testing, probably because your application just doesn't use it.
+
+<pre>python manage.py test --nodb</pre>
+<pre>python manage.py test --nodb --coverage</pre>
+
h2. Licence
XMLUnit is included out of convenience. It was written by Marc-Elian Begin <Marc-Elian.Begin@cern.ch> and is Copyright (c) Members of the EGEE Collaboration. 2004. http://www.eu-egee.org
View
5 setup.py
@@ -2,9 +2,9 @@
setup(
name = "django-test-extensions",
- version = "0.4",
+ version = "0.5",
author = "Gareth Rushgrove",
- author_email = "gareth@morethanseven.net",
+ author_email = "gareth@morethanseven.net",
url = "http://github.com/garethr/django-test-extensions/",
packages = find_packages('src'),
@@ -15,6 +15,7 @@
install_requires=[
'setuptools',
'BeautifulSoup',
+ 'coverage',
],
classifiers = [
"Intended Audience :: Developers",
View
11 src/test_extensions/management/commands/test.py
@@ -18,6 +18,8 @@ class Command(BaseCommand):
help='Produce figleaf coverage report'),
make_option('--xml', action='store_true', dest='xml', default=False,
help='Produce xml output for cruise control'),
+ make_option('--nodb', action='store_true', dest='nodb', default=False,
+ help='No database required for these tests'),
)
help = """Custom test command which allows for
specifying different test runners."""
@@ -37,7 +39,12 @@ def handle(self, *test_labels, **options):
management.get_commands()
management._commands['syncdb'] = 'django.core'
- if options.get('coverage'):
+ if options.get('nodb'):
+ if options.get('coverage'):
+ test_runner_name = 'test_extensions.testrunners.nodatabase.run_tests_with_coverage'
+ else:
+ test_runner_name = 'test_extensions.testrunners.nodatabase.run_tests'
+ elif options.get('coverage'):
test_runner_name = 'test_extensions.testrunners.codecoverage.run_tests'
elif options.get('figleaf'):
test_runner_name = 'test_extensions.testrunners.figleafcoverage.run_tests'
@@ -56,6 +63,6 @@ def handle(self, *test_labels, **options):
test_runner = getattr(test_module, test_path[-1])
failures = test_runner(test_labels, verbosity=verbosity,
- interactive=interactive)
+ interactive=interactive)
if failures:
sys.exit(failures)
View
15 src/test_extensions/testrunners/codecoverage.py
@@ -6,6 +6,8 @@
from django.test.simple import run_tests as django_test_runner
from django.db.models import get_app, get_apps
+from nodatabase import run_tests as nodatabase_run_tests
+
def get_coverage_modules(app_module):
"""
Returns a list of modules to report coverage info for, given an
@@ -31,7 +33,8 @@ def get_all_coverage_modules(app_module):
mod_list = []
for root, dirs, files in os.walk(app_dirpath):
root_path = app_path + root[len(app_dirpath):].split(os.path.sep)[1:]
- if app_path[0] not in settings.EXCLUDE_FROM_COVERAGE:
+ excludes = getattr(settings, 'EXCLUDE_FROM_COVERAGE', [])
+ if app_path[0] not in excludes:
for file in files:
if file.lower().endswith('.py'):
mod_name = file[:-3].lower()
@@ -46,15 +49,19 @@ def get_all_coverage_modules(app_module):
return mod_list
def run_tests(test_labels, verbosity=1, interactive=True,
- extra_tests=[]):
+ extra_tests=[], nodatabase=False):
"""
Test runner which displays a code coverage report at the end of the
run.
"""
coverage.use_cache(0)
coverage.start()
- results = django_test_runner(test_labels, verbosity, interactive,
- extra_tests)
+ if nodatabase:
+ results = nodatabase_run_tests(test_labels, verbosity, interactive,
+ extra_tests)
+ else:
+ results = django_test_runner(test_labels, verbosity, interactive,
+ extra_tests)
coverage.stop()
coverage_modules = []
View
145 src/test_extensions/testrunners/nodatabase.py
@@ -0,0 +1,145 @@
+"""
+Test runner that doesn't use the database. Contributed by
+Bradley Wright <intranation.com>
+"""
+
+import os
+import unittest
+from glob import glob
+
+from django.test.utils import setup_test_environment, teardown_test_environment
+from django.conf import settings
+from django.test.simple import *
+
+import coverage
+
+def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]):
+ """
+ Run the unit tests for all the test labels in the provided list.
+ Labels must be of the form:
+ - app.TestClass.test_method
+ Run a single specific test method
+ - app.TestClass
+ Run all the test methods in a given class
+ - app
+ Search for doctests and unittests in the named application.
+
+ When looking for tests, the test runner will look in the models and
+ tests modules for the application.
+
+ A list of 'extra' tests may also be provided; these tests
+ will be added to the test suite.
+
+ Returns the number of tests that failed.
+ """
+ setup_test_environment()
+
+ settings.DEBUG = False
+ suite = unittest.TestSuite()
+
+ modules_to_cover = []
+
+ # if passed a list of tests...
+ if test_labels:
+ for label in test_labels:
+ if '.' in label:
+ suite.addTest(build_test(label))
+ else:
+ app = get_app(label)
+ suite.addTest(build_suite(app))
+ # ...otherwise use all installed
+ else:
+ for app in get_apps():
+ # skip apps named "Django" because they use a database
+ if not app.__name__.startswith('django'):
+ # get the actual app name
+ app_name = app.__name__.replace('.models', '')
+ # get a list of the files inside that module
+ files = glob('%s/*.py' % app_name)
+ # remove models because we don't use them, stupid
+ new_files = [i for i in files if not i.endswith('models.py')]
+ modules_to_cover.extend(new_files)
+ # actually test the file
+ suite.addTest(build_suite(app))
+
+ for test in extra_tests:
+ suite.addTest(test)
+
+ result = unittest.TextTestRunner(verbosity=verbosity).run(suite)
+
+ teardown_test_environment()
+
+ return len(result.failures) + len(result.errors)
+
+def run_tests_with_coverage(test_labels, verbosity=1, interactive=True, extra_tests=[]):
+ """
+ Run the unit tests for all the test labels in the provided list.
+ Labels must be of the form:
+ - app.TestClass.test_method
+ Run a single specific test method
+ - app.TestClass
+ Run all the test methods in a given class
+ - app
+ Search for doctests and unittests in the named application.
+
+ When looking for tests, the test runner will look in the models and
+ tests modules for the application.
+
+ A list of 'extra' tests may also be provided; these tests
+ will be added to the test suite.
+
+ Returns the number of tests that failed.
+ """
+ setup_test_environment()
+
+ settings.DEBUG = False
+ suite = unittest.TestSuite()
+
+ modules_to_cover = []
+
+ # start doing some coverage action
+ coverage.erase()
+ coverage.start()
+
+ # if passed a list of tests...
+ if test_labels:
+ for label in test_labels:
+ if '.' in label:
+ suite.addTest(build_test(label))
+ else:
+ app = get_app(label)
+ suite.addTest(build_suite(app))
+ # ...otherwise use all installed
+ else:
+ for app in get_apps():
+ # skip apps named "Django" because they use a database
+ if not app.__name__.startswith('django'):
+ # get the actual app name
+ app_name = app.__name__.replace('.models', '')
+ # get a list of the files inside that module
+ files = glob('%s/*.py' % app_name)
+ # remove models because we don't use them, stupid
+ new_files = [i for i in files if not i.endswith('models.py')]
+ modules_to_cover.extend(new_files)
+ # actually test the file
+ suite.addTest(build_suite(app))
+
+ for test in extra_tests:
+ suite.addTest(test)
+
+ result = unittest.TextTestRunner(verbosity=verbosity).run(suite)
+
+ teardown_test_environment()
+
+ # stop coverage
+ coverage.stop()
+
+ # output results
+ print ''
+ print '--------------------------'
+ print 'Unit test coverage results'
+ print '--------------------------'
+ print ''
+ coverage.report(modules_to_cover, show_missing=1)
+
+ return len(result.failures) + len(result.errors)

0 comments on commit 4315772

Please sign in to comment.
Something went wrong with that request. Please try again.