Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse code

Fixed #506 -- runtests.py now allows models to be tested individually…

…. Thanks, Simon

git-svn-id: http://code.djangoproject.com/svn/django/trunk@646 bcc190cf-cafb-0310-a4f2-bffc1f526a37
  • Loading branch information...
commit 09bd9d3ef6f1b843a58859698c2d8587461d5de6 1 parent bcc2873
Adrian Holovaty authored September 19, 2005

Showing 1 changed file with 42 additions and 28 deletions. Show diff stats Hide diff stats

  1. 70  tests/runtests.py
70  tests/runtests.py
@@ -42,7 +42,7 @@ def report_unexpected_exception(self, out, test, example, exc_info):
42 42
         tb = ''.join(traceback.format_exception(*exc_info)[1:])
43 43
         log_error(test.name, "API test raised an exception",
44 44
             "Code: %r\nLine: %s\nException: %s" % (example.source.strip(), example.lineno, tb))
45  
-            
  45
+
46 46
 class DjangoDoctestOutputChecker(doctest.OutputChecker):
47 47
     def check_output(self, want, got, optionflags):
48 48
         ok = doctest.OutputChecker.check_output(self, want, got, optionflags)
@@ -54,8 +54,9 @@ def check_output(self, want, got, optionflags):
54 54
         return ok
55 55
 
56 56
 class TestRunner:
57  
-    def __init__(self, verbosity_level=0):
  57
+    def __init__(self, verbosity_level=0, which_tests=None):
58 58
         self.verbosity_level = verbosity_level
  59
+        self.which_tests = which_tests
59 60
 
60 61
     def output(self, required_level, message):
61 62
         if self.verbosity_level > required_level - 1:
@@ -66,11 +67,22 @@ def run_tests(self):
66 67
         from django.core.db import db
67 68
         from django.core import management, meta
68 69
 
69  
-        self.output(0, "Running tests with database %r" % settings.DATABASE_ENGINE)
70  
-
71 70
         # Manually set INSTALLED_APPS to point to the test app.
72 71
         settings.INSTALLED_APPS = (APP_NAME,)
73 72
 
  73
+        # Determine which models we're going to test.
  74
+        test_models = get_test_models()
  75
+        if self.which_tests:
  76
+            # Only run the specified tests.
  77
+            bad_models = [m for m in self.which_tests if m not in test_models]
  78
+            if bad_models:
  79
+                sys.stderr.write("Models not found: %s\n" % bad_models)
  80
+                sys.exit(1)
  81
+            else:
  82
+                test_models = self.which_tests
  83
+
  84
+        self.output(0, "Running tests with database %r" % settings.DATABASE_ENGINE)
  85
+
74 86
         # If we're using SQLite, it's more convenient to test against an
75 87
         # in-memory database.
76 88
         if settings.DATABASE_ENGINE == "sqlite3":
@@ -107,7 +119,7 @@ def run_tests(self):
107 119
 
108 120
         # Run the tests for each test model.
109 121
         self.output(1, "Running app tests")
110  
-        for model_name in get_test_models():
  122
+        for model_name in test_models:
111 123
             self.output(1, "%s model: Importing" % model_name)
112 124
             try:
113 125
                 mod = meta.get_app(model_name)
@@ -132,30 +144,31 @@ def run_tests(self):
132 144
                 # side effects on other tests.
133 145
                 db.rollback()
134 146
 
135  
-        # Run the non-model tests in the other tests dir
136  
-        self.output(1, "Running other tests")
137  
-        other_tests_dir = os.path.join(os.path.dirname(__file__), OTHER_TESTS_DIR)
138  
-        test_modules = [f[:-3] for f in os.listdir(other_tests_dir) if f.endswith('.py') and not f.startswith('__init__')]
139  
-        for module in test_modules:
140  
-            self.output(1, "%s module: Importing" % module)
141  
-            try:
142  
-                mod = __import__("othertests." + module, '', '', [''])
143  
-            except Exception, e:
144  
-                log_error(module, "Error while importing", ''.join(traceback.format_exception(*sys.exc_info())[1:]))
145  
-                continue
146  
-            if mod.__doc__:
147  
-                p = doctest.DocTestParser()
148  
-                dtest = p.get_doctest(mod.__doc__, mod.__dict__, module, None, None)
149  
-                runner = DjangoDoctestRunner(verbosity_level=verbosity_level, verbose=False)
150  
-                self.output(1, "%s module: runing tests" % module)
151  
-                runner.run(dtest, clear_globs=True, out=sys.stdout.write)
152  
-            if hasattr(mod, "run_tests") and callable(mod.run_tests):
153  
-                self.output(1, "%s module: runing tests" % module)
  147
+        if not self.which_tests:
  148
+            # Run the non-model tests in the other tests dir
  149
+            self.output(1, "Running other tests")
  150
+            other_tests_dir = os.path.join(os.path.dirname(__file__), OTHER_TESTS_DIR)
  151
+            test_modules = [f[:-3] for f in os.listdir(other_tests_dir) if f.endswith('.py') and not f.startswith('__init__')]
  152
+            for module in test_modules:
  153
+                self.output(1, "%s module: Importing" % module)
154 154
                 try:
155  
-                    mod.run_tests(verbosity_level)
  155
+                    mod = __import__("othertests." + module, '', '', [''])
156 156
                 except Exception, e:
157  
-                    log_error(module, "Exception running tests", ''.join(traceback.format_exception(*sys.exc_info())[1:]))
  157
+                    log_error(module, "Error while importing", ''.join(traceback.format_exception(*sys.exc_info())[1:]))
158 158
                     continue
  159
+                if mod.__doc__:
  160
+                    p = doctest.DocTestParser()
  161
+                    dtest = p.get_doctest(mod.__doc__, mod.__dict__, module, None, None)
  162
+                    runner = DjangoDoctestRunner(verbosity_level=verbosity_level, verbose=False)
  163
+                    self.output(1, "%s module: running tests" % module)
  164
+                    runner.run(dtest, clear_globs=True, out=sys.stdout.write)
  165
+                if hasattr(mod, "run_tests") and callable(mod.run_tests):
  166
+                    self.output(1, "%s module: running tests" % module)
  167
+                    try:
  168
+                        mod.run_tests(verbosity_level)
  169
+                    except Exception, e:
  170
+                        log_error(module, "Exception running tests", ''.join(traceback.format_exception(*sys.exc_info())[1:]))
  171
+                        continue
159 172
 
160 173
         # Unless we're using SQLite, remove the test database to clean up after
161 174
         # ourselves. Connect to the previous database (not the test database)
@@ -176,17 +189,18 @@ def run_tests(self):
176 189
 
177 190
         # Display output.
178 191
         if error_list:
179  
-            print "Got %s error%s:" % (len(error_list), len(error_list) != 1 and 's' or '')
180 192
             for d in error_list:
181 193
                 print
182 194
                 print d['title']
183 195
                 print "=" * len(d['title'])
184 196
                 print d['description']
  197
+            print "%s error%s:" % (len(error_list), len(error_list) != 1 and 's' or '')
185 198
         else:
186 199
             print "All tests passed."
187 200
 
188 201
 if __name__ == "__main__":
189 202
     from optparse import OptionParser
  203
+    usage = "%prog [options] [model model model ...]"
190 204
     parser = OptionParser()
191 205
     parser.add_option('-v', help='How verbose should the output be? Choices are 0, 1 and 2, where 2 is most verbose. Default is 0.',
192 206
         type='choice', choices=['0', '1', '2'])
@@ -198,5 +212,5 @@ def run_tests(self):
198 212
         verbosity_level = int(options.v)
199 213
     if options.settings:
200 214
         os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
201  
-    t = TestRunner(verbosity_level)
  215
+    t = TestRunner(verbosity_level, args)
202 216
     t.run_tests()

0 notes on commit 09bd9d3

Please sign in to comment.
Something went wrong with that request. Please try again.