Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: ask/django-test-extensions
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: master
Choose a base ref
...
head repository: sverrejoh/django-test-extensions
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: master
Choose a head ref
Can’t automatically merge. Don’t worry, you can still create the pull request.
  • 4 commits
  • 5 files changed
  • 1 contributor

Commits on Feb 18, 2010

  1. Whitespace cleanup.

    sverrejoh committed Feb 18, 2010
    Copy the full SHA
    d5cc055 View commit details
  2. Whitespace cleanup.

    sverrejoh committed Feb 18, 2010
    Copy the full SHA
    bcd208a View commit details
  3. Copy the full SHA
    dac0902 View commit details
  4. Missing option

    sverrejoh committed Feb 18, 2010
    Copy the full SHA
    3f8172c View commit details
3 changes: 2 additions & 1 deletion src/test_extensions/management/commands/test.py
Original file line number Diff line number Diff line change
@@ -49,7 +49,8 @@ def handle(self, *test_labels, **options):

verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive', True)

failfast = options.get('failfast', False)

# it's quite possible someone, lets say South, might have stolen
# the syncdb command from django. For testing purposes we should
# probably put it back. Migrations don't really make sense
6 changes: 3 additions & 3 deletions src/test_extensions/testrunners/codecoverage.py
Original file line number Diff line number Diff line change
@@ -64,7 +64,7 @@ def get_all_coverage_modules(app_module):

return mod_list

def run_tests(test_labels, verbosity=1, interactive=True,
def run_tests(test_labels, verbosity=1, interactive=True, failfast=False,
extra_tests=[], nodatabase=False):
"""
Test runner which displays a code coverage report at the end of the
@@ -77,12 +77,12 @@ def run_tests(test_labels, verbosity=1, interactive=True,
cov.start()
if nodatabase:
results = nodatabase_run_tests(test_labels, verbosity, interactive,
extra_tests)
failfast, extra_tests)
else:
from django.db import connection
connection.creation.destroy_test_db = lambda *a, **k: None
results = django_test_runner(test_labels, verbosity, interactive,
extra_tests)
failfast, extra_tests)
cov.stop()

coverage_modules = []
10 changes: 6 additions & 4 deletions src/test_extensions/testrunners/figleafcoverage.py
Original file line number Diff line number Diff line change
@@ -5,15 +5,17 @@
from django.test.simple import run_tests as django_test_runner

import figleaf

def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]):

def run_tests(test_labels, verbosity=1, interactive=True, failfast=False,
extra_tests=[]):
setup_test_environment()
figleaf.start()
test_results = django_test_runner(test_labels, verbosity, interactive, extra_tests)
test_results = django_test_runner(test_labels, verbosity, interactive,
failfast, extra_tests)
figleaf.stop()
if not os.path.isdir(os.path.join("temp", "figleaf")): os.mkdir(os.path.join("temp", "figleaf"))
file_name = "temp/figleaf/test_output.figleaf"
figleaf.write_coverage(file_name)
output = commands.getoutput("figleaf2html " + file_name + " --output-directory=temp/figleaf")
print output
return test_results
return test_results
41 changes: 23 additions & 18 deletions src/test_extensions/testrunners/nodatabase.py
Original file line number Diff line number Diff line change
@@ -2,18 +2,19 @@
Test runner that doesn't use the database. Contributed by
Bradley Wright <intranation.com>
"""

import os
import unittest
from glob import glob

from django.test.utils import setup_test_environment, teardown_test_environment
from django.conf import settings
from django.test.simple import *

import coverage

def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]):

def run_tests(test_labels, verbosity=1, interactive=True, failfast=False,
extra_tests=[]):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
@@ -23,22 +24,22 @@ def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]):
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
setup_test_environment()

settings.DEBUG = False
suite = unittest.TestSuite()

modules_to_cover = []

# if passed a list of tests...
if test_labels:
for label in test_labels:
@@ -47,6 +48,9 @@ def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]):
else:
app = get_app(label)
suite.addTest(build_suite(app))
# Use settings if defined
elif settings.TEST_APPS:
test_labels = settings.TEST_APPS
# ...otherwise use all installed
else:
for app in get_apps():
@@ -61,17 +65,18 @@ def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]):
modules_to_cover.extend(new_files)
# actually test the file
suite.addTest(build_suite(app))

for test in extra_tests:
suite.addTest(test)

result = unittest.TextTestRunner(verbosity=verbosity).run(suite)

teardown_test_environment()

return len(result.failures) + len(result.errors)

def run_tests_with_coverage(test_labels, verbosity=1, interactive=True, extra_tests=[]):

def run_tests_with_coverage(test_labels, verbosity=1, interactive=True,
failfast=False, extra_tests=[]):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
@@ -142,4 +147,4 @@ def run_tests_with_coverage(test_labels, verbosity=1, interactive=True, extra_te
print ''
coverage.report(modules_to_cover, show_missing=1)

return len(result.failures) + len(result.errors)
return len(result.failures) + len(result.errors)
17 changes: 9 additions & 8 deletions src/test_extensions/testrunners/xmloutput.py
Original file line number Diff line number Diff line change
@@ -6,12 +6,13 @@
from django.test.simple import *
from django.utils.html import escape

def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]):
def run_tests(test_labels, verbosity=1, interactive=True, failfast=False,
extra_tests=[]):
setup_test_environment()
settings.DEBUG = False

settings.DEBUG = False
suite = unittest.TestSuite()

if test_labels:
for label in test_labels:
if '.' in label:
@@ -22,7 +23,7 @@ def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]):
else:
for app in get_apps():
suite.addTest(build_suite(app))

for test in extra_tests:
suite.addTest(test)

@@ -31,9 +32,9 @@ def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]):
connection.creation.create_test_db(verbosity, autoclobber=not interactive)
result = XMLTestRunner(verbosity=verbosity).run(suite)
connection.creation.destroy_test_db(old_name, verbosity)

teardown_test_environment()

return len(result.failures) + len(result.errors)


@@ -117,4 +118,4 @@ def printErrors(self):
pass #assert False

def printErrorList(self, flavour, errors):
assert False
assert False