This repository has been archived by the owner on Jan 30, 2023. It is now read-only.
-
-
Notifications
You must be signed in to change notification settings - Fork 7
/
control.py
1314 lines (1130 loc) · 49.1 KB
/
control.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
Classes involved in doctesting
This module controls the various classes involved in doctesting.
AUTHORS:
- David Roe (2012-03-27) -- initial version, based on Robert Bradshaw's code.
"""
# ****************************************************************************
# Copyright (C) 2012 David Roe <roed.math@gmail.com>
# Robert Bradshaw <robertwb@gmail.com>
# William Stein <wstein@gmail.com>
# Copyright (C) 2016 Jeroen Demeyer <jdemeyer@cage.ugent.be>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import absolute_import, division, print_function
import random
import os
import sys
import time
import json
import re
import types
import sage.misc.flatten
from sage.structure.sage_object import SageObject
from sage.env import DOT_SAGE, SAGE_LIB, SAGE_SRC, SAGE_LOCAL, SAGE_EXTCODE
from sage.misc.temporary_file import tmp_dir
from cysignals.signals import AlarmInterrupt, init_cysignals
from .sources import FileDocTestSource, DictAsObject
from .forker import DocTestDispatcher
from .reporting import DocTestReporter
from .util import Timer, count_noun, dict_difference
from .external import external_software, available_software
from sage.features import PythonModule
nodoctest_regex = re.compile(r'\s*(#+|%+|r"+|"+|\.\.)\s*nodoctest')
optionaltag_regex = re.compile(r'^\w+$')
# Optional tags which are always automatically added
auto_optional_tags = set(['py3'])
class DocTestDefaults(SageObject):
"""
This class is used for doctesting the Sage doctest module.
It fills in attributes to be the same as the defaults defined in
``sage-runtests``, expect for a few places,
which is mostly to make doctesting more predictable.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults
sage: D = DocTestDefaults()
sage: D
DocTestDefaults()
sage: D.timeout
-1
Keyword arguments become attributes::
sage: D = DocTestDefaults(timeout=100)
sage: D
DocTestDefaults(timeout=100)
sage: D.timeout
100
"""
def __init__(self, **kwds):
"""
Edit these parameters after creating an instance.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults
sage: D = DocTestDefaults()
sage: 'sage' in D.optional
True
"""
# NOTE that these are NOT the defaults used by the sage-runtests
# script (which is what gets invoked when running `sage -t`).
# These are only basic defaults when invoking the doctest runner
# from Python, which is not the typical use case.
self.nthreads = 1
self.serial = False
self.timeout = -1
self.memlimit = 0
self.all = False
self.logfile = None
self.sagenb = False
self.long = False
self.warn_long = None
self.randorder = None
self.random_seed = 0
self.global_iterations = 1 # sage-runtests default is 0
self.file_iterations = 1 # sage-runtests default is 0
self.initial = False
self.exitfirst = False
self.force_lib = False
self.abspath = True # sage-runtests default is False
self.verbose = False
self.debug = False
self.only_errors = False
self.gdb = False
self.valgrind = False
self.massif = False
self.cachegrind = False
self.omega = False
self.failed = False
self.new = False
self.show_skipped = False
self.target_walltime = None
# sage-runtests contains more optional tags. Technically, adding
# auto_optional_tags here is redundant, since that is added
# automatically anyway. However, this default is still used for
# displaying user-defined optional tags and we don't want to see
# the auto_optional_tags there.
self.optional = set(['sage']) | auto_optional_tags
# > 0: always run GC before every test
# < 0: disable GC
self.gc = 0
# We don't want to use the real stats file by default so that
# we don't overwrite timings for the actual running doctests.
self.stats_path = os.path.join(DOT_SAGE, "timings_dt_test.json")
self.__dict__.update(kwds)
def _repr_(self):
"""
Return the print representation.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults
sage: DocTestDefaults(timeout=100, foobar="hello")
DocTestDefaults(foobar='hello', timeout=100)
"""
s = "DocTestDefaults("
for k in sorted(dict_difference(self.__dict__, DocTestDefaults().__dict__).keys()):
if s[-1] != "(":
s += ", "
s += str(k) + "=" + repr(getattr(self,k))
s += ")"
return s
def __eq__(self, other):
"""
Comparison by __dict__.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults
sage: DD1 = DocTestDefaults(long=True)
sage: DD2 = DocTestDefaults(long=True)
sage: DD1 == DD2
True
"""
if not isinstance(other, DocTestDefaults):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Test for unequality.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults
sage: DD1 = DocTestDefaults(long=True)
sage: DD2 = DocTestDefaults(long=True)
sage: DD1 != DD2
False
"""
return not (self == other)
def skipdir(dirname):
"""
Return True if and only if the directory ``dirname`` should not be
doctested.
EXAMPLES::
sage: from sage.doctest.control import skipdir
sage: skipdir(sage.env.SAGE_SRC)
False
sage: skipdir(os.path.join(sage.env.SAGE_SRC, "sage", "doctest", "tests"))
True
"""
if os.path.exists(os.path.join(dirname, "nodoctest.py")) or os.path.exists(os.path.join(dirname, "nodoctest")):
return True
return False
def skipfile(filename):
"""
Return True if and only if the file ``filename`` should not be
doctested.
EXAMPLES::
sage: from sage.doctest.control import skipfile
sage: skipfile("skipme.c")
True
sage: filename = tmp_filename(ext=".pyx")
sage: skipfile(filename)
False
sage: with open(filename, "w") as f:
....: _ = f.write("# nodoctest")
sage: skipfile(filename)
True
"""
base, ext = os.path.splitext(filename)
if ext not in ('.py', '.pyx', '.pxd', '.pxi', '.sage', '.spyx', '.rst', '.tex'):
return True
with open(filename) as F:
line_count = 0
for line in F:
if nodoctest_regex.match(line):
return True
line_count += 1
if line_count >= 10:
break
return False
class Logger(object):
r"""
File-like object which implements writing to multiple files at
once.
EXAMPLES::
sage: from sage.doctest.control import Logger
sage: with open(tmp_filename(), "w+") as t:
....: L = Logger(sys.stdout, t)
....: _ = L.write("hello world\n")
....: _ = t.seek(0)
....: t.read()
hello world
'hello world\n'
"""
def __init__(self, *files):
"""
Initialize the logger for writing to all files in ``files``.
TESTS::
sage: from sage.doctest.control import Logger
sage: Logger().write("hello world\n") # no-op
"""
self.files = list(files)
def write(self, x):
r"""
Write ``x`` to all files.
TESTS::
sage: from sage.doctest.control import Logger
sage: Logger(sys.stdout).write("hello world\n")
hello world
"""
for f in self.files:
f.write(x)
def flush(self):
"""
Flush all files.
TESTS::
sage: from sage.doctest.control import Logger
sage: Logger(sys.stdout).flush()
"""
for f in self.files:
f.flush()
class DocTestController(SageObject):
"""
This class controls doctesting of files.
After creating it with appropriate options, call the :meth:`run` method to run the doctests.
"""
def __init__(self, options, args):
"""
Initialization.
INPUT:
- options -- either options generated from the command line by sage-runtests
or a DocTestDefaults object (possibly with some entries modified)
- args -- a list of filenames to doctest
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DC = DocTestController(DocTestDefaults(), [])
sage: DC
DocTest Controller
"""
# First we modify options to take environment variables into
# account and check compatibility of the user's specified
# options.
if options.timeout < 0:
if options.gdb or options.debug:
# Interactive debuggers: "infinite" timeout
options.timeout = 0
elif options.valgrind or options.massif or options.cachegrind or options.omega:
# Non-interactive debuggers: 48 hours
options.timeout = int(os.getenv('SAGE_TIMEOUT_VALGRIND', 48 * 60 * 60))
elif options.long:
options.timeout = int(os.getenv('SAGE_TIMEOUT_LONG', 30 * 60))
else:
options.timeout = int(os.getenv('SAGE_TIMEOUT', 5 * 60))
# For non-default GC options, double the timeout
if options.gc:
options.timeout *= 2
if options.nthreads == 0:
options.nthreads = int(os.getenv('SAGE_NUM_THREADS_PARALLEL',1))
if options.failed and not (args or options.new or options.sagenb):
# If the user doesn't specify any files then we rerun all failed files.
options.all = True
if options.global_iterations == 0:
options.global_iterations = int(os.environ.get('SAGE_TEST_GLOBAL_ITER', 1))
if options.file_iterations == 0:
options.file_iterations = int(os.environ.get('SAGE_TEST_ITER', 1))
if options.debug:
if options.nthreads > 1:
print("Debugging requires single-threaded operation, setting number of threads to 1.")
if options.logfile:
print("Debugging is not compatible with logging, disabling logfile.")
options.serial = True
options.logfile = None
if options.serial:
options.nthreads = 1
if options.verbose:
options.show_skipped = True
if isinstance(options.optional, str):
s = options.optional.lower()
options.optional = set(s.split(','))
if "all" in options.optional:
# Special case to run all optional tests
options.optional = True
else:
# We replace the 'optional' tag by all optional
# packages for which the installed version matches the
# latest available version (this implies in particular
# that the package is actually installed).
if 'optional' in options.optional:
options.optional.discard('optional')
from sage.misc.package import list_packages
for pkg in list_packages('optional', local=True).values():
if pkg['installed'] and pkg['installed_version'] == pkg['remote_version']:
options.optional.add(pkg['name'])
# Check that all tags are valid
for o in options.optional:
if not optionaltag_regex.search(o):
raise ValueError('invalid optional tag {!r}'.format(o))
options.optional |= auto_optional_tags
self.options = options
if options.memlimit > 0:
# Allow tests that require a virtual memory limit to be set
options.optional.add('memlimit')
self.files = args
if options.logfile:
try:
self.logfile = open(options.logfile, 'a')
except IOError:
print("Unable to open logfile {!r}\nProceeding without logging.".format(options.logfile))
self.logfile = None
else:
self.logfile = None
# Flush any diagnostic messages we just printed
sys.stdout.flush()
sys.stderr.flush()
# In serial mode, we run just one process. Then the doctests
# will interfere with the output logging (both use stdout).
# To solve this, we create real_stdout which will always
# write to the actual standard output, regardless of
# redirections.
if options.serial:
self._real_stdout = os.fdopen(os.dup(sys.stdout.fileno()), "w")
self._close_stdout = True
else:
# Parallel mode: no special tricks needed
self._real_stdout = sys.stdout
self._close_stdout = False
if self.logfile is None:
self.logger = self._real_stdout
else:
self.logger = Logger(self._real_stdout, self.logfile)
self.stats = {}
self.load_stats(options.stats_path)
self._init_warn_long()
if self.options.random_seed is None:
self.options.random_seed = 0
def __del__(self):
if getattr(self, 'logfile', None) is not None:
self.logfile.close()
if getattr(self, '_close_stdout', False):
self._real_stdout.close()
def _init_warn_long(self):
"""
Pick a suitable default for the ``--warn-long`` option if not specified.
It is desirable to have all tests (even ``# long`` ones)
finish in less than about 5 seconds. Longer tests typically
don't add coverage, they just make testing slow.
The default used here is 60 seconds on a modern computer. It
should eventually be lowered to 5 seconds, but its best to
boil the frog slowly.
The stored timings are used to adjust this limit according to
the machine running the tests.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DC = DocTestController(DocTestDefaults(), [])
sage: DC.options.warn_long = 5.0
sage: DC._init_warn_long()
sage: DC.options.warn_long # existing command-line options are not changed
5.00000000000000
"""
if self.options.warn_long is not None: # Specified on the command line
return
try:
self.options.warn_long = 60.0 * self.second_on_modern_computer()
except RuntimeError as err:
if not sage.doctest.DOCTEST_MODE:
print(err) # No usable timing information
def second_on_modern_computer(self):
"""
Return the wall time equivalent of a second on a modern computer.
OUTPUT:
Float. The wall time on your computer that would be equivalent
to one second on a modern computer. Unless you have kick-ass
hardware this should always be >= 1.0. Raises a
``RuntimeError`` if there are no stored timings to use as
benchmark.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DC = DocTestController(DocTestDefaults(), [])
sage: DC.second_on_modern_computer() # not tested
"""
if len(self.stats) == 0:
raise RuntimeError('no stored timings available')
success = []
failed = []
for mod in self.stats.values():
if mod.get('failed', False):
failed.append(mod['walltime'])
else:
success.append(mod['walltime'])
if len(success) < 2500:
raise RuntimeError('too few successful tests, not using stored timings')
if len(failed) > 20:
raise RuntimeError('too many failed tests, not using stored timings')
expected = 12800.0 # Core i7 Quad-Core 2014
return sum(success) / expected
def _repr_(self):
"""
String representation.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DC = DocTestController(DocTestDefaults(), [])
sage: repr(DC) # indirect doctest
'DocTest Controller'
"""
return "DocTest Controller"
def load_stats(self, filename):
"""
Load stats from the most recent run(s).
Stats are stored as a JSON file, and include information on
which files failed tests and the walltime used for execution
of the doctests.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DC = DocTestController(DocTestDefaults(), [])
sage: import json
sage: filename = tmp_filename()
sage: with open(filename, 'w') as stats_file:
....: json.dump({'sage.doctest.control':{'walltime':1.0r}}, stats_file)
sage: DC.load_stats(filename)
sage: DC.stats['sage.doctest.control']
{u'walltime': 1.0}
If the file doesn't exist, nothing happens. If there is an
error, print a message. In any case, leave the stats alone::
sage: d = tmp_dir()
sage: DC.load_stats(os.path.join(d)) # Cannot read a directory
Error loading stats from ...
sage: DC.load_stats(os.path.join(d, "no_such_file"))
sage: DC.stats['sage.doctest.control']
{u'walltime': 1.0}
"""
# Simply ignore non-existing files
if not os.path.exists(filename):
return
try:
with open(filename) as stats_file:
self.stats.update(json.load(stats_file))
except Exception:
self.log("Error loading stats from %s"%filename)
def save_stats(self, filename):
"""
Save stats from the most recent run as a JSON file.
WARNING: This function overwrites the file.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DC = DocTestController(DocTestDefaults(), [])
sage: DC.stats['sage.doctest.control'] = {'walltime':1.0r}
sage: filename = tmp_filename()
sage: DC.save_stats(filename)
sage: import json
sage: with open(filename) as f:
....: D = json.load(f)
sage: D['sage.doctest.control']
{u'walltime': 1.0}
"""
from sage.misc.temporary_file import atomic_write
with atomic_write(filename) as stats_file:
json.dump(self.stats, stats_file)
def log(self, s, end="\n"):
"""
Log the string ``s + end`` (where ``end`` is a newline by default)
to the logfile and print it to the standard output.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DD = DocTestDefaults(logfile=tmp_filename())
sage: DC = DocTestController(DD, [])
sage: DC.log("hello world")
hello world
sage: DC.logfile.close()
sage: with open(DD.logfile) as f:
....: print(f.read())
hello world
In serial mode, check that logging works even if ``stdout`` is
redirected::
sage: DD = DocTestDefaults(logfile=tmp_filename(), serial=True)
sage: DC = DocTestController(DD, [])
sage: from sage.doctest.forker import SageSpoofInOut
sage: with open(os.devnull, 'w') as devnull:
....: S = SageSpoofInOut(devnull)
....: S.start_spoofing()
....: DC.log("hello world")
....: S.stop_spoofing()
hello world
sage: DC.logfile.close()
sage: with open(DD.logfile) as f:
....: print(f.read())
hello world
Check that no duplicate logs appear, even when forking (:trac:`15244`)::
sage: DD = DocTestDefaults(logfile=tmp_filename())
sage: DC = DocTestController(DD, [])
sage: DC.log("hello world")
hello world
sage: if os.fork() == 0:
....: DC.logfile.close()
....: os._exit(0)
sage: DC.logfile.close()
sage: with open(DD.logfile) as f:
....: print(f.read())
hello world
"""
self.logger.write(s + end)
self.logger.flush()
def test_safe_directory(self, dir=None):
"""
Test that the given directory is safe to run Python code from.
TESTS::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DD = DocTestDefaults()
sage: DC = DocTestController(DD, [])
sage: DC.test_safe_directory()
sage: d = os.path.join(tmp_dir(), "test")
sage: os.mkdir(d)
sage: os.chmod(d, 0o777)
sage: DC.test_safe_directory(d)
Traceback (most recent call last):
...
RuntimeError: refusing to run doctests...
"""
import stat
is_world_writeable = bool(os.stat(dir or os.getcwd()).st_mode & stat.S_IWOTH)
if is_world_writeable:
raise RuntimeError(
"refusing to run doctests from the current "
"directory '{}' since untrusted users could put files in "
"this directory, making it unsafe to run Sage code from"
.format(os.getcwd()))
def create_run_id(self):
"""
Creates the run id.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DC = DocTestController(DocTestDefaults(), [])
sage: DC.create_run_id()
Running doctests with ID ...
"""
self.run_id = time.strftime('%Y-%m-%d-%H-%M-%S-') + "%08x" % random.getrandbits(32)
self.log("Running doctests with ID %s."%self.run_id)
def add_files(self):
r"""
Checks for the flags '--all', '--new' and '--sagenb'.
For each one present, this function adds the appropriate directories and files to the todo list.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: from sage.env import SAGE_SRC
sage: import os
sage: log_location = os.path.join(SAGE_TMP, 'control_dt_log.log')
sage: DD = DocTestDefaults(all=True, logfile=log_location)
sage: DC = DocTestController(DD, [])
sage: DC.add_files()
Doctesting entire Sage library.
sage: os.path.join(SAGE_SRC, 'sage') in DC.files
True
::
sage: DD = DocTestDefaults(new = True)
sage: DC = DocTestController(DD, [])
sage: DC.add_files()
Doctesting ...
::
sage: DD = DocTestDefaults(sagenb = True)
sage: DC = DocTestController(DD, [])
sage: DC.add_files() # py2 # optional - sagenb
Doctesting the Sage notebook.
sage: DC.files[0][-6:] # py2 # optional - sagenb
'sagenb'
"""
opj = os.path.join
from sage.env import SAGE_SRC, SAGE_DOC_SRC, SAGE_ROOT, SAGE_ROOT_GIT
# SAGE_ROOT_GIT can be None on distributions which typically
# only have the SAGE_LOCAL install tree but not SAGE_ROOT
if SAGE_ROOT_GIT is not None:
have_git = os.path.isdir(SAGE_ROOT_GIT)
else:
have_git = False
def all_files():
self.files.append(opj(SAGE_SRC, 'sage'))
# Don't run these tests when not in the git repository; they are
# of interest for building sage, but not for runtime behavior and
# don't make sense to run outside a build environment
if have_git:
self.files.append(opj(SAGE_SRC, 'sage_setup'))
self.files.append(SAGE_DOC_SRC)
self.options.sagenb = True
if self.options.all or (self.options.new and not have_git):
self.log("Doctesting entire Sage library.")
all_files()
elif self.options.new and have_git:
# Get all files changed in the working repo.
self.log("Doctesting files changed since last git commit")
import subprocess
change = subprocess.check_output(["git",
"--git-dir=" + SAGE_ROOT_GIT,
"--work-tree=" + SAGE_ROOT,
"status",
"--porcelain"])
change = change.decode('utf-8')
for line in change.split("\n"):
if not line:
continue
data = line.strip().split(' ')
status, filename = data[0], data[-1]
if (set(status).issubset("MARCU")
and filename.startswith("src/sage")
and (filename.endswith(".py") or
filename.endswith(".pyx") or
filename.endswith(".rst"))):
self.files.append(os.path.relpath(opj(SAGE_ROOT,filename)))
if self.options.sagenb:
if not PythonModule('sagenb').is_present():
if not self.options.all:
self.log("Skipping doctesting of the Sage notebook: "
"not installed on Python 3")
return
if not self.options.all:
self.log("Doctesting the Sage notebook.")
from pkg_resources import Requirement, working_set
sagenb_loc = working_set.find(Requirement.parse('sagenb')).location
self.files.append(opj(sagenb_loc, 'sagenb'))
def expand_files_into_sources(self):
r"""
Expands ``self.files``, which may include directories, into a
list of :class:`sage.doctest.FileDocTestSource`
This function also handles the optional command line option.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: from sage.env import SAGE_SRC
sage: import os
sage: dirname = os.path.join(SAGE_SRC, 'sage', 'doctest')
sage: DD = DocTestDefaults(optional='all')
sage: DC = DocTestController(DD, [dirname])
sage: DC.expand_files_into_sources()
sage: len(DC.sources)
11
sage: DC.sources[0].options.optional
True
::
sage: DD = DocTestDefaults(optional='magma,guava')
sage: DC = DocTestController(DD, [dirname])
sage: DC.expand_files_into_sources()
sage: sorted(DC.sources[0].options.optional) # abs tol 1
['guava', 'magma', 'py2']
We check that files are skipped appropriately::
sage: dirname = tmp_dir()
sage: filename = os.path.join(dirname, 'not_tested.py')
sage: with open(filename, 'w') as f:
....: _ = f.write("#"*80 + "\n\n\n\n## nodoctest\n sage: 1+1\n 4")
sage: DC = DocTestController(DD, [dirname])
sage: DC.expand_files_into_sources()
sage: DC.sources
[]
The directory ``sage/doctest/tests`` contains ``nodoctest.py``
but the files should still be tested when that directory is
explicitly given (as opposed to being recursed into)::
sage: DC = DocTestController(DD, [os.path.join(SAGE_SRC, 'sage', 'doctest', 'tests')])
sage: DC.expand_files_into_sources()
sage: len(DC.sources) >= 10
True
"""
def expand():
for path in self.files:
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for dir in list(dirs):
if dir[0] == "." or skipdir(os.path.join(root,dir)):
dirs.remove(dir)
for file in files:
if not skipfile(os.path.join(root,file)):
yield os.path.join(root, file)
else:
# the user input this file explicitly, so we don't skip it
yield path
self.sources = [FileDocTestSource(path, self.options) for path in expand()]
def filter_sources(self):
"""
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: from sage.env import SAGE_SRC
sage: import os
sage: dirname = os.path.join(SAGE_SRC, 'sage', 'doctest')
sage: DD = DocTestDefaults(failed=True)
sage: DC = DocTestController(DD, [dirname])
sage: DC.expand_files_into_sources()
sage: for i, source in enumerate(DC.sources):
....: DC.stats[source.basename] = {'walltime': 0.1*(i+1)}
sage: DC.stats['sage.doctest.control'] = {'failed':True,'walltime':1.0}
sage: DC.filter_sources()
Only doctesting files that failed last test.
sage: len(DC.sources)
1
"""
# Filter the sources to only include those with failing doctests if the --failed option is passed
if self.options.failed:
self.log("Only doctesting files that failed last test.")
def is_failure(source):
basename = source.basename
return basename not in self.stats or self.stats[basename].get('failed')
self.sources = [x for x in self.sources if is_failure(x)]
def sort_sources(self):
r"""
This function sorts the sources so that slower doctests are run first.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: from sage.env import SAGE_SRC
sage: import os
sage: dirname = os.path.join(SAGE_SRC, 'sage', 'doctest')
sage: DD = DocTestDefaults(nthreads=2)
sage: DC = DocTestController(DD, [dirname])
sage: DC.expand_files_into_sources()
sage: DC.sources.sort(key=lambda s:s.basename)
sage: for i, source in enumerate(DC.sources):
....: DC.stats[source.basename] = {'walltime': 0.1*(i+1)}
sage: DC.sort_sources()
Sorting sources by runtime so that slower doctests are run first....
sage: print("\n".join([source.basename for source in DC.sources]))
sage.doctest.util
sage.doctest.test
sage.doctest.sources
sage.doctest.reporting
sage.doctest.parsing
sage.doctest.forker
sage.doctest.fixtures
sage.doctest.external
sage.doctest.control
sage.doctest.all
sage.doctest
"""
if self.options.nthreads > 1 and len(self.sources) > self.options.nthreads:
self.log("Sorting sources by runtime so that slower doctests are run first....")
default = dict(walltime=0)
def sort_key(source):
basename = source.basename
return -self.stats.get(basename, default).get('walltime'), basename
self.sources = sorted(self.sources, key=sort_key)
def run_doctests(self):
"""
Actually runs the doctests.
This function is called by :meth:`run`.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: from sage.env import SAGE_SRC
sage: import os
sage: dirname = os.path.join(SAGE_SRC, 'sage', 'rings', 'homset.py')
sage: DD = DocTestDefaults()
sage: DC = DocTestController(DD, [dirname])
sage: DC.expand_files_into_sources()
sage: DC.run_doctests()
Doctesting 1 file.
sage -t .../sage/rings/homset.py
[... tests, ... s]
----------------------------------------------------------------------
All tests passed!
----------------------------------------------------------------------
Total time for all tests: ... seconds
cpu time: ... seconds
cumulative wall time: ... seconds
"""
nfiles = 0
nother = 0
for F in self.sources:
if isinstance(F, FileDocTestSource):
nfiles += 1
else:
nother += 1
if self.sources:
filestr = ", ".join(([count_noun(nfiles, "file")] if nfiles else []) +
([count_noun(nother, "other source")] if nother else []))
threads = " using %s threads"%(self.options.nthreads) if self.options.nthreads > 1 else ""
iterations = []
if self.options.global_iterations > 1:
iterations.append("%s global iterations"%(self.options.global_iterations))
if self.options.file_iterations > 1:
iterations.append("%s file iterations"%(self.options.file_iterations))
iterations = ", ".join(iterations)
if iterations:
iterations = " (%s)"%(iterations)
self.log("Doctesting %s%s%s."%(filestr, threads, iterations))
self.reporter = DocTestReporter(self)
self.dispatcher = DocTestDispatcher(self)
N = self.options.global_iterations
for _ in range(N):
try:
self.timer = Timer().start()
self.dispatcher.dispatch()
except KeyboardInterrupt:
break
finally:
self.timer.stop()
self.reporter.finalize()
self.cleanup(False)
else:
self.log("No files to doctest")
self.reporter = DictAsObject(dict(error_status=0, stats={}))
def cleanup(self, final=True):
"""
Runs cleanup activities after actually running doctests.
In particular, saves the stats to disk and closes the logfile.
INPUT:
- ``final`` -- whether to close the logfile
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: from sage.env import SAGE_SRC
sage: import os
sage: dirname = os.path.join(SAGE_SRC, 'sage', 'rings', 'infinity.py')
sage: DD = DocTestDefaults()
sage: DC = DocTestController(DD, [dirname])
sage: DC.expand_files_into_sources()
sage: DC.sources.sort(key=lambda s:s.basename)
sage: for i, source in enumerate(DC.sources):
....: DC.stats[source.basename] = {'walltime': 0.1*(i+1)}
....:
sage: DC.run()
Running doctests with ID ...
Doctesting 1 file.
sage -t .../rings/infinity.py
[... tests, ... s]
----------------------------------------------------------------------
All tests passed!
----------------------------------------------------------------------
Total time for all tests: ... seconds
cpu time: ... seconds
cumulative wall time: ... seconds
0
sage: DC.cleanup()
"""
self.stats.update(self.reporter.stats)
self.save_stats(self.options.stats_path)
# Close the logfile
if final and self.logfile is not None:
self.logfile.close()
self.logfile = None
def _optional_tags_string(self):
"""
Return a string describing the optional tags used.
OUTPUT: a string with comma-separated tags (without spaces, so
it can be used to build a command-line)
EXAMPLES::