Skip to content

Commit

Permalink
Merge pull request idaholab#8639 from milljm/dry-run-fix-8637
Browse files Browse the repository at this point in the history
Fix --dry-run
  • Loading branch information
permcody committed Mar 6, 2017
2 parents 1e4a9e3 + 619d7af commit d234153
Show file tree
Hide file tree
Showing 4 changed files with 107 additions and 27 deletions.
81 changes: 54 additions & 27 deletions python/TestHarness/TestHarness.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,10 @@ def findAndRunTests(self, find_only=False):
# This method spawns another process and allows this loop to continue looking for tests
# RunParallel will call self.testOutputAndFinish when the test has completed running
# This method will block when the maximum allowed parallel processes are running
self.runner.run(tester, command)
if self.options.dry_run:
self.handleTestStatus(tester)
else:
self.runner.run(tester, command)
else: # This job is skipped - notify the runner
status = tester.getStatus()
if status != tester.bucket_silent: # SILENT occurs when a user is using --re options
Expand Down Expand Up @@ -614,9 +617,12 @@ def handleTestStatus(self, tester, output=None):
elif self.options.pbs and self.options.processingPBS == True:
self.handleTestResult(tester, '', tester.getStatusMessage(), 0, 0, True)

# All other statuses will be testers that exited prematurely (according to the TestHarness) or were skipped
# All other statuses will be testers that exited prematurely (according to the TestHarness)
# So populate the result now based on status, and send the test to the result method to be
# printed to the screen
else:
self.handleTestResult(tester, '', '', 0, 0, True)
result = tester.getStatusMessage()
self.handleTestResult(tester, '', result, 0, 0, True)
test_completed = True

return test_completed
Expand All @@ -636,15 +642,21 @@ def handleTestResult(self, tester, output, result, start=0, end=0, add_to_table=
elif self.options.store_time:
timing = self.getSolveTime(output)

# format the SKIP messages received, otherwise leave 'result' unmolested
if status == 'SKIP':
# format the SKIP messages received
if status == tester.bucket_skip:
# Include caveats in skipped messages? Usefull to know when a scaled long "RUNNING..." test completes
# but Exodiff is instructed to 'SKIP' on scaled tests.
if len(caveats):
result = '[' + ', '.join(caveats).upper() + '] skipped (' + tester.getStatusMessage() + ')'
else:
result = 'skipped (' + tester.getStatusMessage() + ')'

# result is normally populated by a tester object when a test has failed. But in this case
# checkRunnableBase determined the test a failure before it even ran. So we need to set the
# results here, so they are printed if the extra_info argument was supplied
elif status == tester.bucket_deleted:
result = tester.getStatusMessage()

# Only add to the test_table if told to. We now have enough cases where we wish to print to the screen, but not
# in the 'Final Test Results' area.
if add_to_table:
Expand Down Expand Up @@ -698,39 +710,54 @@ def handleTestResult(self, tester, output, result, start=0, end=0, add_to_table=
def cleanup(self):
# Print the results table again if a bunch of output was spewed to the screen between
# tests as they were running
if self.options.verbose or (self.num_failed != 0 and not self.options.quiet):
if (self.options.verbose or (self.num_failed != 0 and not self.options.quiet)) and not self.options.dry_run:
print '\n\nFinal Test Results:\n' + ('-' * (TERM_COLS-1))
for (test, output, result, timing, start, end) in sorted(self.test_table, key=lambda x: x[2], reverse=True):
print printResult(test, result, timing, start, end, self.options)

time = clock() - self.start_time
print '-' * (TERM_COLS-1)
print 'Ran %d tests in %.1f seconds' % (self.num_passed+self.num_failed, time)

if self.num_passed:
summary = '<g>%d passed</g>'
else:
summary = '<b>%d passed</b>'
summary += ', <b>%d skipped</b>'
if self.num_pending:
summary += ', <c>%d pending</c>'
else:
summary += ', <b>%d pending</b>'
if self.num_failed:
summary += ', <r>%d FAILED</r>'
else:
summary += ', <b>%d failed</b>'
print '-' * (TERM_COLS-1)

# Mask off TestHarness error codes to report parser errors
fatal_error = ''
if self.error_code & Parser.getErrorCodeMask():
summary += ', <r>FATAL PARSER ERROR</r>'
fatal_error += ', <r>FATAL PARSER ERROR</r>'
if self.error_code & ~Parser.getErrorCodeMask():
summary += ', <r>FATAL TEST HARNESS ERROR</r>'
fatal_error += ', <r>FATAL TEST HARNESS ERROR</r>'

# Print a different footer when performing a dry run
if self.options.dry_run:
print 'Processed %d tests in %.1f seconds' % (self.num_passed+self.num_skipped, time)
summary = '<b>%d would run</b>'
summary += ', <b>%d would be skipped</b>'
summary += fatal_error
print colorText( summary % (self.num_passed, self.num_skipped), "", html = True, \
colored=self.options.colored, code=self.options.code )

else:
print 'Ran %d tests in %.1f seconds' % (self.num_passed+self.num_failed, time)

if self.num_passed:
summary = '<g>%d passed</g>'
else:
summary = '<b>%d passed</b>'
summary += ', <b>%d skipped</b>'
if self.num_pending:
summary += ', <c>%d pending</c>'
else:
summary += ', <b>%d pending</b>'
if self.num_failed:
summary += ', <r>%d FAILED</r>'
else:
summary += ', <b>%d failed</b>'
summary += fatal_error

print colorText( summary % (self.num_passed, self.num_skipped, self.num_pending, self.num_failed), "", html = True, \
colored=self.options.colored, code=self.options.code )
if self.options.pbs:
print '\nYour PBS batch file:', self.options.pbs

print colorText( summary % (self.num_passed, self.num_skipped, self.num_pending, self.num_failed), "", html = True, \
colored=self.options.colored, code=self.options.code )
if self.options.pbs:
print '\nYour PBS batch file:', self.options.pbs
if self.file:
self.file.close()

Expand Down
7 changes: 7 additions & 0 deletions python/TestHarness/testers/Tester.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,13 @@ def processResults(self, moose_dir, retcode, options, output):
def checkRunnableBase(self, options, checks, test_list=None):
reason = ''

# If --dry-run set the test status to pass and DO NOT return.
# This will allow additional checks to perform and report tests
# that would normally be skipped (and return as False).
if options.dry_run:
self.success_message = 'DRY RUN'
self.setStatus(self.success_message, self.bucket_success)

# Check if we only want to run failed tests
if options.failed_tests:
if self.specs['test_name'] not in test_list:
Expand Down
38 changes: 38 additions & 0 deletions python/TestHarness/unit_tests/test_HarnessTester.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,3 +95,41 @@ def testDuplicateOutputsOK(self):
self.assertNotRegexpMatches(output, 'heavy_out.e')
# all
self.assertNotRegexpMatches(output, 'FATAL TEST HARNESS ERROR')

def testDeleted(self):
"""
Test that deleted tests returns a failed deleted test when extra info argument is supplied
"""
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.runTests('-i', 'deleted', '-e')

e = cm.exception
self.assertRegexpMatches(e.output, 'test_harness\.deleted.*?deleted \(test deleted test\)')


def testDryRun(self):
"""
Test that --dry-run returns a passing status
"""
output = self.runTests('-i', 'diffs', '--dry-run')

self.assertRegexpMatches(output, 'test_harness\.exodiff.*?DRY RUN')
self.assertRegexpMatches(output, 'test_harness\.csvdiff.*?DRY RUN')

# Skipped caveat test which returns skipped instead of 'DRY RUN'
output = self.runTests('-i', 'depend_skip_tests', '--dry-run')
self.assertIn('skipped (always skipped)', output)

# NOTE: This test normally returns (skipped dependency). However
# with dry run, the TestHarness has no idea that this is the case
# because in order to figure that out, the test has to 'run' and we are
# not running any tests (its a dry run after all)!
self.assertRegexpMatches(output, 'test_harness\.needs_a.*?DRY RUN')

# Deleted caveat test which returns a deleted failing tests while
# performing a dry run
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.runTests('-i', 'deleted', '-e', '--dry-run')

e = cm.exception
self.assertRegexpMatches(e.output, 'test_harness\.deleted.*?deleted \(test deleted test\)')
8 changes: 8 additions & 0 deletions test/tests/test_harness/deleted
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
[Tests]
[./deleted]
type = Exodiff
input = exodiff.i
exodiff = exodiff_out.e
deleted = 'test deleted test'
[../]
[]

0 comments on commit d234153

Please sign in to comment.