diff --git a/geos_ats_package/geos_ats/command_line_parsers.py b/geos_ats_package/geos_ats/command_line_parsers.py index 1e9b44c52..09ead6f99 100644 --- a/geos_ats_package/geos_ats/command_line_parsers.py +++ b/geos_ats_package/geos_ats/command_line_parsers.py @@ -40,7 +40,11 @@ def build_command_line_parser(): parser.add_argument( "geos_bin_dir", type=str, help="GEOS binary directory." ) - parser.add_argument( "-w", "--workingDir", type=str, help="Initial working directory" ) + parser.add_argument( "ats_target", type=str, help="ats file" ) + + parser.add_argument( "-w", "--workingDir", type=str, help="Root working directory" ) + + parser.add_argument( "-b", "--baselineDir", type=str, help="Root baseline directory" ) action_names = ','.join( action_options.keys() ) parser.add_argument( "-a", "--action", type=str, default="run", help=f"Test actions options ({action_names})" ) @@ -89,6 +93,8 @@ def build_command_line_parser(): parser.add_argument( "-l", "--logs", type=str, default=None ) + parser.add_argument( "-f", "--allow-failed-tests", default=False, action='store_true' ) + parser.add_argument( "--failIfTestsFail", action="store_true", @@ -98,8 +104,6 @@ def build_command_line_parser(): parser.add_argument( "-n", "-N", "--numNodes", type=int, default="2" ) - parser.add_argument( "ats_targets", type=str, nargs='*', help="ats files or directories." ) - return parser @@ -127,6 +131,13 @@ def parse_command_line_arguments( args ): print( f"Selected verbose option ({verbose}) not recognized" ) exit_flag = True + # Paths + if not options.workingDir: + options.workingDir = os.path.basename( options.ats_target ) + + if not options.baselineDir: + options.baselineDir = options.workingDir + # Print detailed information if options.detail: for option_type, details in zip( [ 'action', 'check' ], [ action_options, check_options ] ): @@ -139,22 +150,3 @@ def parse_command_line_arguments( args ): quit() return options - - -def patch_parser( parser ): - - def add_option_patch( *xargs, **kwargs ): - """ - Convert type string to actual type instance - """ - tmp = kwargs.get( 'type', str ) - type_map = { 'string': str } - if isinstance( tmp, str ): - if tmp in type_map: - tmp = type_map[ tmp ] - else: - tmp = locate( tmp ) - kwargs[ 'type' ] = tmp - parser.add_argument( *xargs, **kwargs ) - - parser.add_option = add_option_patch diff --git a/geos_ats_package/geos_ats/configuration_record.py b/geos_ats_package/geos_ats/configuration_record.py index 78f1ab886..d98cc5106 100644 --- a/geos_ats_package/geos_ats/configuration_record.py +++ b/geos_ats_package/geos_ats/configuration_record.py @@ -167,7 +167,7 @@ def initializeConfig( configFile, configOverride, options ): geos_atsdir = os.path.realpath( os.path.dirname( __file__ ) ) # configfile - config.add( "testbaseline_dir", str, "", "Base directory that contains all the baselines" ) + config.add( "testbaseline_directory", str, "", "Base directory that contains all the baselines" ) config.add( "geos_bin_dir", str, "", "Directory that contains 'geos' and related executables." ) @@ -194,11 +194,6 @@ def initializeConfig( configFile, configOverride, options ): config.add( "report_doc_remake", bool, False, "Remake test documentation, even if it already exists (used with html reports)" ) - config.add( "report_text", bool, True, "True if you want text results to be generated with the report action" ) - config.add( "report_text_file", str, "test_results.txt", "Location to write the text report" ) - config.add( "report_text_echo", bool, True, "If True, echo the report to stdout" ) - config.add( "report_wait", bool, False, "Wait until all tests are complete before reporting" ) - config.add( "report_ini", bool, True, "True if you want ini results to be generated with the report action" ) config.add( "report_ini_file", str, "test_results.ini", "Location to write the ini report" ) @@ -213,10 +208,6 @@ def initializeConfig( configFile, configOverride, options ): config.add( "checkmessages_never_ignore_regexp", type( [] ), [ "not yet implemented" ], "Regular expression to not ignore in all checkmessages steps." ) - config.add( "report_timing", bool, False, "True if you want timing file to be generated with the report action" ) - config.add( "report_timing_overwrite", bool, False, - "True if you want timing file to overwrite existing timing file rather than augment it" ) - # timing and priority config.add( "priority", str, "equal", "Method of prioritization of tests: [\"equal\", \"processors\",\"timing\"]" ) config.add( "timing_file", str, "timing.txt", "Location of timing file" ) diff --git a/geos_ats_package/geos_ats/environment_setup.py b/geos_ats_package/geos_ats/environment_setup.py index 509373f27..47ced31de 100644 --- a/geos_ats_package/geos_ats/environment_setup.py +++ b/geos_ats_package/geos_ats/environment_setup.py @@ -4,20 +4,24 @@ import argparse -def setup_ats( src_path, build_path, ats_xargs, ats_machine, ats_machine_dir ): +def setup_ats( src_path, build_path, baseline_dir, working_dir, ats_xargs, ats_machine, ats_machine_dir ): bin_dir = os.path.join( build_path, "bin" ) geos_ats_fname = os.path.join( bin_dir, "run_geos_ats" ) - ats_dir = os.path.abspath( os.path.join( src_path, "integratedTests", "tests", "allTests" ) ) test_path = os.path.join( build_path, "integratedTests" ) link_path = os.path.join( test_path, "integratedTests" ) run_script_fname = os.path.join( test_path, "geos_ats.sh" ) log_dir = os.path.join( test_path, "TestResults" ) + baseline_dir = os.path.abspath( baseline_dir ) + working_dir = os.path.abspath( working_dir ) + ats_main_file = os.path.abspath( os.path.join( src_path, 'inputFiles', 'main.ats' ) ) - # Create a symbolic link to test directory + # Create a symbolic link to working directory + for d in [ baseline_dir, working_dir, test_path ]: + os.makedirs( d, exist_ok=True ) if os.path.islink( link_path ): print( 'integratedTests symlink already exists' ) else: - os.symlink( ats_dir, link_path ) + os.symlink( working_dir, link_path ) # Build extra arguments that should be passed to ATS joined_args = [ ' '.join( x ) for x in ats_xargs ] @@ -30,7 +34,9 @@ def setup_ats( src_path, build_path, ats_xargs, ats_machine, ats_machine_dir ): # Write the bash script to run ats. with open( run_script_fname, "w" ) as g: g.write( "#!/bin/bash\n" ) - g.write( f"{geos_ats_fname} {bin_dir} --workingDir {ats_dir} --logs {log_dir} {ats_args} \"$@\"\n" ) + g.write( + f"{geos_ats_fname} {bin_dir} {ats_main_file} --workingDir {working_dir} --baselineDir {baseline_dir} --logs {log_dir} {ats_args} \"$@\"\n" + ) # Make the script executable st = os.stat( run_script_fname ) @@ -46,6 +52,8 @@ def main(): parser = argparse.ArgumentParser( description="Setup ATS script" ) parser.add_argument( "src_path", type=str, help="GEOS src path" ) parser.add_argument( "build_path", type=str, help="GEOS build path" ) + parser.add_argument( "baseline_dir", type=str, help="GEOS test baseline root directory" ) + parser.add_argument( "working_dir", type=str, help="GEOS test working root directory" ) parser.add_argument( "--ats", nargs='+', default=[], @@ -54,7 +62,8 @@ def main(): parser.add_argument( "--machine", type=str, default='', help="ATS machine name" ) parser.add_argument( "--machine-dir", type=str, default='', help="ATS machine directory" ) options, unkown_args = parser.parse_known_args() - setup_ats( options.src_path, options.build_path, options.ats, options.machine, options.machine_dir ) + setup_ats( options.src_path, options.build_path, options.baseline_dir, options.working_dir, options.ats, + options.machine, options.machine_dir ) if __name__ == '__main__': diff --git a/geos_ats_package/geos_ats/helpers/curve_check.py b/geos_ats_package/geos_ats/helpers/curve_check.py index bdf2213eb..1353f62dd 100644 --- a/geos_ats_package/geos_ats/helpers/curve_check.py +++ b/geos_ats_package/geos_ats/helpers/curve_check.py @@ -275,20 +275,17 @@ def compare_time_history_curves( fname, baseline, curve, tolerance, output, outp # Generate script-based curve if script_instructions and ( len( data ) > 0 ): data[ 'script' ] = {} - try: - for script, fn, p, s in script_instructions: - k = location_strings[ p ] - data[ 'script' ][ f'{p} Time' ] = data[ 'target' ][ f'{p} Time' ] - key = f'{p} {k}' - key2 = f'{p}' - if s != DEFAULT_SET_NAME: - key += f' {s}' - key2 += f' {s}' - data[ 'script' ][ key ] = data[ 'target' ][ key ] - data[ 'script' ][ key2 ] = evaluate_external_script( script, fn, data[ 'target' ] ) - data_sizes[ p ][ s ][ 'script' ] = list( np.shape( data[ 'script' ][ key2 ] ) ) - except Exception as e: - errors.append( str( e ) ) + for script, fn, p, s in script_instructions: + k = location_strings[ p ] + data[ 'script' ][ f'{p} Time' ] = data[ 'target' ][ f'{p} Time' ] + key = f'{p} {k}' + key2 = f'{p}' + if s != DEFAULT_SET_NAME: + key += f' {s}' + key2 += f' {s}' + data[ 'script' ][ key ] = data[ 'target' ][ key ] + data[ 'script' ][ key2 ] = evaluate_external_script( script, fn, data[ 'target' ] ) + data_sizes[ p ][ s ][ 'script' ] = list( np.shape( data[ 'script' ][ key2 ] ) ) # Reshape data if necessary so that they have a predictable number of dimensions for k in data.keys(): diff --git a/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py b/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py index fb1e4f07c..488aba172 100644 --- a/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py +++ b/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py @@ -1,8 +1,8 @@ #BATS:batchGeosatsMoab batchGeosatsMoab BatchGeosatsMoab -1 from ats import machines, configuration, log, atsut, times, AtsTest # type: ignore[import] -import subprocess, sys, os, shlex, time, socket, re -import utils, batchTemplate # type: ignore[import] +import subprocess, sys, os, time, socket, re +import utils # type: ignore[import] from batch import BatchMachine # type: ignore[import] import logging diff --git a/geos_ats_package/geos_ats/main.py b/geos_ats_package/geos_ats/main.py index f03b63ff0..42e43c1a4 100644 --- a/geos_ats_package/geos_ats/main.py +++ b/geos_ats_package/geos_ats/main.py @@ -1,6 +1,5 @@ import sys import os -import glob import shutil import signal import subprocess @@ -21,51 +20,7 @@ geos_atsStartTime = 0 -def check_ats_targets( options, testcases, configOverride, args ): - """ - Determine which files, directories, or tests to run. - Handle command line config options. - """ - configOverride[ "executable_path" ] = options.geos_bin_dir - - ats_files = [] - for a in options.ats_targets: - if "=" in a: - key, value = a.split( "=" ) - configOverride[ key ] = value - args.remove( a ) - - elif not options.info: - if os.path.exists( a ): - args.remove( a ) - if os.path.isdir( a ): - newfiles = glob.glob( os.path.join( a, "*.ats" ) ) - ats_files.extend( newfiles ) - else: - ats_files.append( a ) - else: - testcases.append( a ) - else: - if options.action in test_actions: - logger.error( f"The command line arg '{a}' is not recognized." - " An ats file or a directory name is expected." ) - sys.exit( 1 ) - - # If no files were specified, look in the target directories - for d in [ '.', 'integratedTests' ]: - if len( ats_files ) == 0: - if os.path.isdir( d ): - ats_files.extend( glob.glob( os.path.join( d, "*.ats" ) ) ) - - # prune out ats continue files. - for a in ats_files[ : ]: - if a.endswith( "continue.ats" ): - ats_files.remove( a ) - - return ats_files - - -def build_ats_arguments( options, ats_files, originalargv, config ): +def build_ats_arguments( options, originalargv, config ): # construct the argv to pass to the ATS: atsargv = [] atsargv.append( originalargv[ 0 ] ) @@ -85,7 +40,7 @@ def build_ats_arguments( options, ats_files, originalargv, config ): for f in os.environ.get( 'ATS_FILTER', '' ).split( ',' ): atsargv.extend( [ '-f', f ] ) - atsargv.extend( ats_files ) + atsargv.append( options.ats_target ) sys.argv = atsargv @@ -218,28 +173,6 @@ def check_timing_file( options, config ): configuration_record.globalTestTimings[ tokens[ 0 ] ] = int( tokens[ 1 ] ) -def append_test_end_step( machine ): - """ - Add extra processing to the end of tests - """ - originalNoteEnd = machine.noteEnd - - def noteEndWrapper( test ): - test.geos_atsTestCase.status.noteEnd( test ) - return originalNoteEnd( test ) - - machine.noteEnd = noteEndWrapper - - -def check_working_dir( workingDir ): - if workingDir: - if os.path.isdir( workingDir ): - os.chdir( workingDir ) - else: - logger.error( f"The requested working dir does not appear to exist: {workingDir}" ) - quit() - - def infoOptions( title, options ): from geos_ats import common_utilities topic = common_utilities.InfoTopic( title ) @@ -251,19 +184,6 @@ def infoOptions( title, options ): topic.endBanner() -def infoParagraph( title, paragraphs ): - from geos_ats import common_utilities - topic = common_utilities.InfoTopic( title ) - topic.startBanner() - table = common_utilities.TextTable( 1 ) - for p in paragraphs: - table.addRow( p ) - table.rowbreak = 1 - table.maxwidth = 75 - table.printTable() - topic.endBanner() - - def info( args ): from geos_ats import ( common_utilities, configuration_record, test_steps, suite_settings, test_case, test_modifier ) @@ -289,68 +209,29 @@ def info( args ): def report( manager ): """The report action""" - from geos_ats import ( test_case, reporting, configuration_record ) - - testcases = test_case.TESTS.values() - - if configuration_record.config.report_wait: - reporter = reporting.ReportWait( testcases ) - reporter.report( sys.stdout ) - - if configuration_record.config.report_text: - reporter = reporting.ReportText( testcases ) - with open( configuration_record.config.report_text_file, "w" ) as filep: - reporter.report( filep ) - if configuration_record.config.report_text_echo: - with open( configuration_record.config.report_text_file, "r" ) as filep: - sys.stdout.write( filep.read() ) + from geos_ats import ( reporting, configuration_record ) if configuration_record.config.report_html: - reporter = reporting.ReportHTML( testcases ) + reporter = reporting.ReportHTML( manager.testlist ) reporter.report() if configuration_record.config.report_ini: - reporter = reporting.ReportIni( testcases ) + reporter = reporting.ReportIni( manager.testlist ) with open( configuration_record.config.report_ini_file, "w" ) as filep: reporter.report( filep ) - if configuration_record.config.report_timing: - reporter = reporting.ReportTiming( testcases ) - if not configuration_record.config.report_timing_overwrite: - try: - with open( configuration_record.config.timing_file, "r" ) as filep: - reporter.getOldTiming( filep ) - except IOError as e: - logger.debug( e ) - with open( configuration_record.config.timing_file, "w" ) as filep: - reporter.report( filep ) - def summary( manager, alog, short=False ): """Periodic summary and final summary""" - from geos_ats import ( reporting, configuration_record, test_case ) + from geos_ats import ( reporting, configuration_record ) if len( manager.testlist ) == 0: return - if hasattr( manager.machine, "getNumberOfProcessors" ): - totalNumberOfProcessors = getattr( manager.machine, "getNumberOfProcessors", None )() - else: - totalNumberOfProcessors = 1 - reporter = reporting.ReportTextPeriodic( manager.testlist ) - reporter.report( geos_atsStartTime, totalNumberOfProcessors ) - if configuration_record.config.report_html and configuration_record.config.report_html_periodic: - testcases = test_case.TESTS.values() - reporter = reporting.ReportHTML( testcases ) + reporter = reporting.ReportHTML( manager.testlist ) reporter.report( refresh=30 ) - if configuration_record.config.report_text: - testcases = test_case.TESTS.values() - reporter = reporting.ReportText( testcases ) - with open( configuration_record.config.report_text_file, "w" ) as filep: - reporter.report( filep ) - def append_geos_ats_summary( manager ): initial_summary = manager.summary @@ -405,7 +286,11 @@ def main(): configOverride = {} testcases = [] configFile = '' - check_working_dir( options.workingDir ) + + # Setup paths + ats_root_dir = os.path.abspath( os.path.dirname( options.ats_target ) ) + os.chdir( ats_root_dir ) + os.makedirs( options.workingDir, exist_ok=True ) create_log_directory( options ) # Check the test configuration @@ -423,11 +308,9 @@ def main(): # Check the report location if options.logs: config.report_html_file = os.path.join( options.logs, 'test_results.html' ) - config.report_text_file = os.path.join( options.logs, 'test_results.txt' ) config.report_ini_file = os.path.join( options.logs, 'test_results.ini' ) - ats_files = check_ats_targets( options, testcases, configOverride, originalargv ) - build_ats_arguments( options, ats_files, originalargv, config ) + build_ats_arguments( options, originalargv, config ) # Additional setup tasks check_timing_file( options, config ) @@ -456,8 +339,12 @@ def main(): ats.AtsTest.glue( configFile=configFile ) ats.AtsTest.glue( configOverride=configOverride ) ats.AtsTest.glue( testmode=False ) + ats.AtsTest.glue( workingDir=options.workingDir ) + ats.AtsTest.glue( baselineDir=options.baselineDir ) + ats.AtsTest.glue( logDir=options.logs ) + ats.AtsTest.glue( atsRootDir=ats_root_dir ) ats.AtsTest.glue( atsFlags=options.ats ) - ats.AtsTest.glue( atsFiles=ats_files ) + ats.AtsTest.glue( atsFiles=options.ats_target ) ats.AtsTest.glue( machine=options.machine ) ats.AtsTest.glue( config=config ) if len( testcases ): @@ -465,11 +352,10 @@ def main(): else: ats.AtsTest.glue( testcases="all" ) - from geos_ats import ( common_utilities, suite_settings, test_case, test_steps, user_utilities ) + from geos_ats import ( common_utilities, suite_settings, test_case, test_steps, test_builder ) # Set ats options append_geos_ats_summary( ats.manager ) - append_test_end_step( ats.manager.machine ) ats.manager.machine.naptime = 0.2 ats.log.echo = True @@ -484,6 +370,11 @@ def main(): # Run ATS # --------------------------------- result = ats.manager.core() + if len( test_builder.test_build_failures ): + tmp = ', '.join( test_builder.test_build_failures ) + logger.error( f'The following ATS test failed to build: {tmp}' ) + if not options.allow_failed_tests: + raise Exception( 'Some tests failed to build' ) # Make sure all the testcases requested were found if testcases != "all": @@ -499,7 +390,7 @@ def main(): # clean if options.action == "veryclean": common_utilities.removeLogDirectories( os.getcwd() ) - files = [ config.report_html_file, config.report_ini_file, config.report_text_file ] + files = [ config.report_html_file, config.report_ini_file ] for f in files: if os.path.exists( f ): os.remove( f ) diff --git a/geos_ats_package/geos_ats/reporting.py b/geos_ats_package/geos_ats/reporting.py index 1f9a3bbc1..64bf328d9 100644 --- a/geos_ats_package/geos_ats/reporting.py +++ b/geos_ats_package/geos_ats/reporting.py @@ -1,104 +1,125 @@ import os import socket -import subprocess import time -import re from geos_ats.configuration_record import config -import sys -import ats # type: ignore[import] from configparser import ConfigParser +from tabulate import tabulate +import glob import logging +from collections.abc import Mapping +from dataclasses import dataclass +from ats import atsut +from ats.times import hms +from ats import ( PASSED, FAILED, TIMEDOUT, EXPECTED, BATCHED, FILTERED, SKIPPED, CREATED, RUNNING, HALTED, LSFERROR ) # Get the active logger instance logger = logging.getLogger( 'geos_ats' ) -# The following are ALEATS test status values. -# The order is important for the ReportGroup: lower values take precendence -FAILRUN = 0 -FAILCHECK = 1 -FAILCHECKMINOR = 2 -TIMEOUT = 3 -INPROGRESS = 4 -NOTRUN = 5 -FILTERED = 6 -RUNNING = 7 -SKIP = 8 -BATCH = 9 -FAILRUNOPTIONAL = 10 -NOTBUILT = 11 -PASS = 12 -EXPECTEDFAIL = 13 -UNEXPECTEDPASS = 14 +# Status value in priority order +STATUS = ( EXPECTED, CREATED, BATCHED, FILTERED, SKIPPED, RUNNING, PASSED, TIMEDOUT, HALTED, LSFERROR, FAILED ) -# A tuple of test status values. -STATUS = ( FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, FILTERED, RUNNING, - INPROGRESS, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT ) +COLORS: Mapping[ str, str ] = { + EXPECTED.name: "black", + CREATED.name: "black", + BATCHED.name: "black", + FILTERED.name: "black", + SKIPPED.name: "orange", + RUNNING.name: "blue", + PASSED.name: "green", + TIMEDOUT.name: "red", + HALTED.name: "brown", + LSFERROR.name: "brown", + FAILED.name: "red", +} -STATUS_NOTDONE = ( NOTRUN, RUNNING, INPROGRESS, BATCH ) - -class ReportBase( object ): - """Base class for reporting. The constructor takes in a sequence - of testcases (of type test_case), and from each testcase, a - ReportTestCase object is created.""" - - def __init__( self, testcases ): - pass +@dataclass +class TestStepRecord: + status: atsut._StatusCode + log: str + output: list + number: int + elapsed: float -class ReportTiming( ReportBase ): - """Reporting class that is used for outputting test timings""" +@dataclass +class TestCaseRecord: + steps: dict + status: atsut._StatusCode + test_number: int + elapsed: float + current_step: str + resources: int - def __init__( self, testcases ): - self.reportcases = [ ReportTestCase( t ) for t in testcases ] - self.timings = {} - def getOldTiming( self, fp ): - for line in fp: - if not line.startswith( '#' ): - tokens = line.split() - self.timings[ tokens[ 0 ] ] = int( tokens[ 1 ] ) +@dataclass +class TestGroupRecord: + tests: list + status: atsut._StatusCode - def report( self, fp ): - for testcase in self.reportcases: - if testcase.status in [ PASS, TIMEOUT ]: - self.timings[ testcase.testcase.name ] = int( testcase.testcase.status.totalTime() ) - output = "" - for key in sorted( self.timings ): - output += "%s %d\n" % ( key, self.timings[ key ] ) - fp.writelines( output ) +def max_status( sa, sb ): + Ia = STATUS.index( sa ) + Ib = STATUS.index( sb ) + return STATUS[ max( Ia, Ib ) ] -class ReportIni( ReportBase ): - """Minimal reporting class that is used for bits status emails""" - def __init__( self, testcases ): - self.reportcases = [ ReportTestCase( t ) for t in testcases ] +class ReportBase( object ): + """Base class for reporting""" + + def __init__( self, test_steps ): + self.test_results = {} + self.test_groups = {} + self.status_lists = {} + + for t in test_steps: + # Parse the test step name + step_name = t.name[ t.name.find( '(' ) + 1:t.name.rfind( '_' ) ] + test_name = step_name[ :step_name.rfind( '_' ) ] + test_id = t.group.number + group_name = test_name[ :test_name.rfind( '_' ) ] + + # Save data + if test_name not in self.test_results: + self.test_results[ test_name ] = TestCaseRecord( steps={}, + status=EXPECTED, + test_number=test_id, + elapsed=0.0, + current_step=' ', + resources=t.np ) + + # Check elapsed time + elapsed = 0.0 + if hasattr( t, 'endTime' ): + elapsed = t.endTime - t.startTime + self.test_results[ test_name ].elapsed += elapsed + + # Add the step + self.test_results[ test_name ].steps[ t.name ] = TestStepRecord( status=t.status, + log=t.outname, + output=t.step_outputs, + number=t.groupSerialNumber, + elapsed=elapsed ) + + # Check the status and the latest step + self.test_results[ test_name ].status = max_status( t.status, self.test_results[ test_name ].status ) + if t.status not in ( EXPECTED, CREATED, BATCHED, FILTERED, SKIPPED ): + self.test_results[ test_name ].current_step = t.name + + if group_name not in self.test_groups: + self.test_groups[ group_name ] = TestGroupRecord( tests=[], status=EXPECTED ) + self.test_groups[ group_name ].tests.append( test_name ) + self.test_groups[ group_name ].status = max_status( t.status, self.test_groups[ group_name ].status ) + + # Collect status names + for s in STATUS: + self.status_lists[ s.name ] = [ k for k, v in self.test_results.items() if v.status == s ] - # A dictionary where the key is a status, and the value is a sequence of ReportTestCases - self.reportcaseResults = {} - for status in STATUS: - self.reportcaseResults[ status ] = [ t for t in self.reportcases if t.status == status ] + self.html_filename = config.report_html_file - self.displayName = {} - self.displayName[ FAILRUN ] = "FAILRUN" - self.displayName[ FAILRUNOPTIONAL ] = "FAILRUNOPTIONAL" - self.displayName[ FAILCHECK ] = "FAILCHECK" - self.displayName[ FAILCHECKMINOR ] = "FAILCHECKMINOR" - self.displayName[ TIMEOUT ] = "TIMEOUT" - self.displayName[ NOTRUN ] = "NOTRUN" - self.displayName[ INPROGRESS ] = "INPROGRESS" - self.displayName[ FILTERED ] = "FILTERED" - self.displayName[ RUNNING ] = "RUNNING" - self.displayName[ PASS ] = "PASSED" - self.displayName[ SKIP ] = "SKIPPED" - self.displayName[ BATCH ] = "BATCHED" - self.displayName[ NOTBUILT ] = "NOTBUILT" - self.displayName[ EXPECTEDFAIL ] = "EXPECTEDFAIL" - self.displayName[ UNEXPECTEDPASS ] = "UNEXPECTEDPASS" - def __getTestCaseName( testcase ): - return testcase.testcase.name +class ReportIni( ReportBase ): + """Minimal reporting class""" def report( self, fp ): configParser = ConfigParser() @@ -107,7 +128,8 @@ def report( self, fp ): configParser.set( "Info", "Time", time.strftime( "%a, %d %b %Y %H:%M:%S" ) ) try: platform = socket.gethostname() - except: + except Exception as e: + logger.debug( str( e ) ) logger.debug( "Could not get host name" ) platform = "unknown" configParser.set( "Info", "Platform", platform ) @@ -125,572 +147,23 @@ def report( self, fp ): configParser.set( "Info", "Extra Notations", extraNotations ) configParser.add_section( "Results" ) - configParser.add_section( "Custodians" ) - configParser.add_section( "Documentation" ) - undocumentedTests = [] - for status in STATUS: - testNames = [] - for reportcaseResult in self.reportcaseResults[ status ]: - testName = reportcaseResult.testcase.name - testNames.append( testName ) - - owner = getowner( testName, reportcaseResult.testcase ) - if owner is not None: - configParser.set( "Custodians", testName, owner ) + for k, v in self.status_lists.items(): + configParser.set( "Results", k, ";".join( sorted( v ) ) ) - if config.report_doc_link: - linkToDocumentation = os.path.join( config.report_doc_dir, testName, testName + ".html" ) - if os.path.exists( linkToDocumentation ): - configParser.set( "Documentation", testName, linkToDocumentation ) - else: - if not reportcaseResult.testcase.nodoc: - undocumentedTests.append( testName ) - linkToDocumentation = getowner( testName, reportcaseResult.testcase ) - testNames = sorted( testNames ) - configParser.set( "Results", self.displayName[ status ], ";".join( testNames ) ) - undocumentedTests = sorted( undocumentedTests ) - configParser.set( "Documentation", "undocumented", ";".join( undocumentedTests ) ) configParser.write( fp ) -class ReportText( ReportBase ): - - def __init__( self, testcases ): - - ReportBase.__init__( self, testcases ) - - self.reportcases = [ ReportTestCase( t ) for t in testcases ] - - # A dictionary where the key is a status, and the value is a sequence of ReportTestCases - self.reportcaseResults = {} - for status in STATUS: - self.reportcaseResults[ status ] = [ t for t in self.reportcases if t.status == status ] - - self.displayName = {} - self.displayName[ FAILRUN ] = "FAIL RUN" - self.displayName[ FAILRUNOPTIONAL ] = "FAIL RUN (OPTIONAL STEP)" - self.displayName[ FAILCHECK ] = "FAIL CHECK" - self.displayName[ FAILCHECKMINOR ] = "FAIL CHECK (MINOR)" - self.displayName[ TIMEOUT ] = "TIMEOUT" - self.displayName[ NOTRUN ] = "NOT RUN" - self.displayName[ INPROGRESS ] = "INPROGRESS" - self.displayName[ FILTERED ] = "FILTERED" - self.displayName[ RUNNING ] = "RUNNING" - self.displayName[ PASS ] = "PASSED" - self.displayName[ SKIP ] = "SKIPPED" - self.displayName[ BATCH ] = "BATCHED" - self.displayName[ NOTBUILT ] = "NOT BUILT" - self.displayName[ EXPECTEDFAIL ] = "EXPECTEDFAIL" - self.displayName[ UNEXPECTEDPASS ] = "UNEXPECTEDPASS" - - def report( self, fp ): - """Write out the text report to the give file pointer""" - self.writeSummary( fp, ( FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, - INPROGRESS, FILTERED, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT ) ) - self.writeLongest( fp, 5 ) - self.writeDetails( fp, - ( FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, FILTERED ) ) - - def writeSummary( self, fp, statuses=STATUS ): - """The summary groups each TestCase by its status.""" - fp.write( "=" * 80 ) - - from geos_ats import common_utilities - for status in statuses: - - tests = self.reportcaseResults[ status ] - num = len( tests ) - fp.write( f"\n {self.displayName[status]} : {num}" ) - if num > 0: - testlist = [] - for test in tests: - testname = test.testcase.name - retries = getattr( test.testcase.atsGroup, "retries", 0 ) - if retries > 0: - testname += '[retry:%d]' % retries - testlist.append( testname ) - fp.write( f' ( {" ".join( testlist )} ) ' ) - - def writeDetails( self, - fp, - statuses=( FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, INPROGRESS ), - columns=( "Status", "TestCase", "Elapsed", "Resources", "TestStep", "OutFile" ) ): - """This function provides more information about each of the test cases""" - - from geos_ats import common_utilities - - table = common_utilities.TextTable( len( columns ) ) - table.setHeader( *columns ) - table.rowbreakstyle = "-" - printTable = False - - for status in statuses: - tests = self.reportcaseResults[ status ] - - if len( tests ) == 0: - continue - - printTable = True - for test in tests: - testcase = test.testcase - label = "" - pathstr = "" - if test.laststep: - paths = testcase.resultPaths( test.laststep ) - label = test.laststep.label() - pathstr = " ".join( [ os.path.relpath( x ) for x in paths ] ) - - row = [] - for col in columns: - if col == "Status": - statusDisplay = self.displayName[ test.status ] - retries = getattr( testcase.atsGroup, "retries", 0 ) - if retries > 0: - statusDisplay += "/retry:%d" % retries - row.append( statusDisplay ) - elif col == "Directory": - row.append( os.path.relpath( testcase.path ) ) - elif col == "TestCase": - row.append( testcase.name ) - elif col == "TestStep": - row.append( label ) - elif col == "OutFile": - row.append( pathstr ) - elif col == "Elapsed": - row.append( ats.times.hms( test.elapsed ) ) - elif col == "Resources": - row.append( ats.times.hms( test.resources ) ) - else: - raise RuntimeError( f"Unknown column {col}" ) - - table.addRow( *row ) - - table.addRowBreak() - - fp.write( '\n' ) - if printTable: - table.printTable( fp ) - fp.write( '\n' ) - - def writeLongest( self, fp, num=5 ): - """The longer running tests are reported""" - - timing = [] - - for test in self.reportcases: - elapsed = test.elapsed - if elapsed > 0: - timing.append( ( elapsed, test ) ) - - timing = sorted( timing, reverse=True ) - - if len( timing ) > 0: - fp.write( '\n' ) - fp.write( '\n LONGEST RUNNING TESTS:' ) - for elapsed, test in timing[ :num ]: - fp.write( f" {ats.times.hms(elapsed)} {test.testcase.name}" ) - - -class ReportTextPeriodic( ReportText ): - """This class is used during the periodic reports. It is - initialized with the actual ATS tests from the ATS manager object. - The report inherits from ReportText, and extend that behavior with - """ - - def __init__( self, atstests ): - - self.atstest = atstests - testcases = list( set( [ test.geos_atsTestCase for test in atstests ] ) ) - ReportText.__init__( self, testcases ) - - def report( self, startTime, totalProcessors=None ): - self.writeSummary( sys.stdout, - ( FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, - INPROGRESS, FILTERED, RUNNING, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT ) ) - self.writeUtilization( sys.stdout, startTime, totalProcessors ) - self.writeLongest( sys.stdout ) - self.writeDetails( sys.stdout, ( FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, RUNNING ), - ( "Status", "TestCase", "Directory", "Elapsed", "Resources", "TestStep" ) ) - - def writeUtilization( self, fp, startTime, totalProcessors=None ): - """Machine utilization is reported""" - totalResourcesUsed = 0.0 - totaltime = time.time() - startTime - for test in self.reportcases: - elapsed = test.elapsed - resources = test.resources - totalResourcesUsed += resources - - if totalResourcesUsed > 0: - fp.write( '\n' ) - fp.write( f"\n TOTAL TIME : {ats.times.hms( totaltime )}" ) - fp.write( f"\n TOTAL PROCESSOR-TIME : {ats.times.hms(totalResourcesUsed )}" ) - - if totalProcessors: - availableResources = totalProcessors * totaltime - utilization = totalResourcesUsed / availableResources * 100.0 - fp.write( f" AVAIL PROCESSOR-TIME : {ats.times.hms(availableResources )}" ) - fp.write( f" RESOURCE UTILIZATION : {utilization:5.3g}%" ) - - class ReportHTML( ReportBase ): """HTML Reporting""" - # only launch a web browser once. - launchedBrowser = False - - def __init__( self, testcases ): - ReportBase.__init__( self, testcases ) - - self.reportcases = [ ReportTestCase( t ) for t in testcases ] - - # A dictionary keyed by Status. The value is a list of ReportGroup - self.groupResults = None - - # A sorted list of all the ReportGroup - self.groups = None - - # Initialize the ReportGroups - self.initializeReportGroups() - - self.color = {} - self.color[ FAILRUN ] = "red" - self.color[ FAILRUNOPTIONAL ] = "yellow" - self.color[ FAILCHECK ] = "reddish" - self.color[ FAILCHECKMINOR ] = "reddish" - self.color[ TIMEOUT ] = "reddish" - self.color[ NOTRUN ] = "yellow" - self.color[ INPROGRESS ] = "blue" - self.color[ FILTERED ] = "blueish" - self.color[ RUNNING ] = "orange" - self.color[ PASS ] = "green" - self.color[ SKIP ] = "yellow" - self.color[ BATCH ] = "yellow" - self.color[ NOTBUILT ] = "blueish" - self.color[ EXPECTEDFAIL ] = "green" - self.color[ UNEXPECTEDPASS ] = "red" - - self.displayName = {} - self.displayName[ FAILRUN ] = "FAIL RUN" - self.displayName[ FAILRUNOPTIONAL ] = "FAIL RUN (OPTIONAL STEP)" - self.displayName[ FAILCHECK ] = "FAIL CHECK" - self.displayName[ FAILCHECKMINOR ] = "FAIL CHECK (MINOR)" - self.displayName[ TIMEOUT ] = "TIMEOUT" - self.displayName[ NOTRUN ] = "NOT RUN" - self.displayName[ INPROGRESS ] = "INPROGRESS" - self.displayName[ FILTERED ] = "FILTERED" - self.displayName[ RUNNING ] = "RUNNING" - self.displayName[ PASS ] = "PASSED" - self.displayName[ SKIP ] = "SKIPPED" - self.displayName[ BATCH ] = "BATCHED" - self.displayName[ NOTBUILT ] = "NOTBUILT" - self.displayName[ EXPECTEDFAIL ] = "EXPECTEDFAIL" - self.displayName[ UNEXPECTEDPASS ] = "UNEXPECTEDPASS" - - self.html_filename = config.report_html_file - - def initializeReportGroups( self ): - testdir = {} - - # place testcases into groups - for reportcase in self.reportcases: - dirname = reportcase.testcase.dirname - if dirname not in testdir: - testdir[ dirname ] = [] - testdir[ dirname ].append( reportcase ) - - self.groups = [ ReportGroup( key, value ) for key, value in testdir.items() ] - - # place groups into a dictionary keyed on the group status - self.groupResults = {} - for status in STATUS: - self.groupResults[ status ] = [ g for g in self.groups if g.status == status ] - def report( self, refresh=0 ): - # potentially regenerate the html documentation for the test suite. - # # This doesn't seem to work: - # self.generateDocumentation() - sp = open( self.html_filename, 'w' ) - - if refresh: - if not any( g.status in ( RUNNING, NOTRUN, INPROGRESS ) for g in self.groups ): - refresh = 0 - self.writeHeader( sp, refresh ) self.writeSummary( sp ) - if config.report_doc_link: - self.writeDoclink( sp ) - - # Set the columns to display - if config.report_doc_link: - groupColumns = ( "Name", "Custodian", "Status" ) - else: - groupColumns = ( "Name", "Status" ) - - testcaseColumns = ( "Status", "Name", "TestStep", "Age", "Elapsed", "Resources", "Output" ) - - # write the details - self.writeTable( sp, groupColumns, testcaseColumns ) + self.writeTable( sp ) self.writeFooter( sp ) sp.close() - # launch the browser, if requested. - - self.browser() - - def generateDocumentation( self ): - """Generate the HTML documentation using atddoc""" - if not config.report_doc_link: - return - - testdocfile = os.path.join( config.report_doc_dir, "testdoc.html" ) - if ( os.path.exists( testdocfile ) and not config.report_doc_remake ): - # Check for any atd files newer than the test html documentation - newest = 0 - for root, dirs, files in os.walk( config.report_doc_dir ): - for file in files: - if file.endswith( ".atd" ): - filetime = os.path.getmtime( os.path.join( root, file ) ) - if filetime > newest: - newest = filetime - if os.path.getmtime( testdocfile ) > newest: - logger.info( f"HTML documentation found in {os.path.relpath(testdocfile)}. Not regenerating." ) - return - - logger.info( "Generating HTML documentation files (running 'atddoc')..." ) - retcode = True - try: - geos_atsdir = os.path.realpath( os.path.dirname( __file__ ) ) - atddoc = os.path.join( geos_atsdir, "atddoc.py" ) - #retcode = subprocess.call( atddoc, cwd=config.report_doc_dir, stdout=subprocess.PIPE) - retcode = subprocess.call( atddoc, cwd=config.report_doc_dir ) - except OSError as e: - logger.debug( e ) - if retcode: - logger.info( f" Failed to create HTML documentation in {config.report_doc_dir}" ) - else: - logger.info( f" HTML documentation created in {config.report_doc_dir}" ) - - def writeRowHeader( self, sp, groupColumns, testcaseColumns ): - header = f""" - - - TEST GROUP - TEST CASE - - - """ - - for col in groupColumns: - if col == "Name": - header += '\n NAME ' - elif col == "Custodian": - header += '\n CUSTODIAN' - elif col == "Status": - header += '\n STATUS ' - else: - raise RuntimeError( f"Unknown column {col}" ) - - for col in testcaseColumns: - if col == "Status": - header += '\n STATUS' - elif col == "Name": - header += '\n NAME ' - elif col == "TestStep": - header += '\n LAST
STEP' - elif col == "Age": - header += '\n AGE' - elif col == "Elapsed": - header += '\n ELAPSED' - elif col == "Resources": - header += '\n RESOURCES' - elif col == "Output": - header += '\n OUTPUT' - else: - raise RuntimeError( f"Unknown column {col}" ) - - header += """ - - """ - sp.write( header ) - - def writeTable( self, sp, groupColumns, testcaseColumns ): - colspan = len( groupColumns ) + len( testcaseColumns ) - header = f""" - - - - - """ - - undocumented = [] - - rowcount = 0 - testgroups = [] - for status in STATUS: - testgroups.extend( self.groupResults[ status ] ) - - for test in testgroups: - rowspan = len( test.testcases ) - if rowcount <= 0: - self.writeRowHeader( sp, groupColumns, testcaseColumns ) - rowcount += 30 - rowcount -= rowspan - - header += f""" - - - - """ - - elif col == "Custodian": - if config.report_doc_link: - owner = getowner( test.name, test.testcases[ 0 ].testcase ) - if owner is not None: - header += f'\n ' - else: - header += f'\n ' - - elif col == "Status": - header += f'' - else: - raise RuntimeError( f"Unknown column {col}" ) - - for testcase in test.testcases: - for col in testcaseColumns: - - if col == "Status": - statusDisplay = self.displayName[ testcase.status ] - retries = getattr( testcase.testcase.atsGroup, "retries", 0 ) - if retries > 0: - statusDisplay += "
retry: %d" % retries - header += f'\n' - - elif col == "Name": - # If an .html file exists for this problem, create a reference to it - testref = "" - testlinksuffix = "" - if config.report_doc_link: - docfound = False - # first check for the full problem name, with the domain extension - testhtml = os.path.join( config.report_doc_dir, test.name, - testcase.testcase.name + ".html" ) - if os.path.exists( testhtml ): - docfound = True - else: - # next check for the full problem name without the domain extension - testhtml = os.path.join( config.report_doc_dir, test.name, - testcase.testcase.name + ".html" ) - if os.path.exists( testhtml ): - docfound = True - else: - # final check for any of the input file names - for step in testcase.testcase.steps: - if getattr( step.p, "deck", None ): - [ inputname, suffix ] = getattr( step.p, "deck" ).rsplit( '.', 1 ) - testhtml = os.path.join( config.report_doc_dir, test.name, - inputname + ".html" ) - if os.path.exists( testhtml ): - # match with the first input file - docfound = True - break - - if docfound: - testref = 'href="%s"' % ( testhtml ) - else: - if not testcase.testcase.nodoc: - testlinksuffix += '
undocumented' - undocumented.append( testcase.testcase.name ) - - header += f"\n" - - elif col == "TestStep": - if testcase.laststep: - header += f"\n" - else: - header += "\n" - - elif col == "Age": - if not testcase.laststep: - header += "\n" - continue - - if testcase.diffage: - difftime = testcase.diffage - days = int( difftime ) / 86400 - if days > 0: - difftime -= days * 86400 - hours = int( difftime ) / 3600 - if days == 0: - # "New" diff file - don't color - header += f'\n' - elif days > 6: - # "Old" diff file (1+ week) - color reddish - header += f'\n' - else: - # less than a week old - but aging. Color yellowish - header += f'\n' - else: - header += "\n" - - elif col == "Elapsed": - if not testcase.elapsed: - header += "\n" - else: - header += f"\n" - - elif col == "Resources": - if not testcase.resources: - header += "\n" - else: - header += f"\n" - - elif col == "Output": - - header += "\n" - else: - raise RuntimeError( f"Unknown column {col}" ) - - header += '\n' - - header += '\n
DETAILED RESULTS
- """ - - for col in groupColumns: - if col == "Name": - header += f"""{test.name} -   {owner}  ' - header += '\n ?   {self.displayName[test.status]}{statusDisplay}{testcase.testcase.name}{testlinksuffix}{testcase.laststep.label()}    {hours}h{days}d{hours}h{days}d{hours}h    {ats.times.hms(testcase.elapsed)}  {ats.times.hms(testcase.resources)}" - seen = {} - for stepnum, step in enumerate( testcase.testcase.steps ): - paths = testcase.testcase.resultPaths( step ) - for p in paths: - # if p has already been accounted for, doesn't exist, or is an empty file, don't print it. - if ( ( ( p in seen ) or not os.path.exists( p ) ) or ( os.stat( p )[ 6 ] == 0 ) ): - continue - header += f"\n{os.path.basename(p)}
" - seen[ p ] = 1 - header += "\n
' - - if config.report_doc_link: - header += '\n

Undocumented test problems:

' - header += '\n\n" - - sp.write( header ) - def writeHeader( self, sp, refresh ): gentime = time.strftime( "%a, %d %b %Y %H:%M:%S" ) header = """ @@ -736,26 +209,24 @@ def writeHeader( self, sp, refresh ): th,td {{ background-color:#EEEEEE }} td.probname {{ background-color: #CCCCCC; font-size: large ; text-align: center}} - td.red {{ background-color: #E10000; color: white }} - td.reddish {{ background-color: #FF6666; }} - td.orange {{ background-color: #FF9900; }} - td.orangish{{ background-color: #FFBB44; }} - td.yellow {{ background-color: #EDED00; }} - td.yellowish {{ background-color: #FFFF99; }} - td.green {{ background-color: #00C000; }} - td.greenyellow {{background-color: #99FF00; }} - td.blue {{ background-color: #0000FF; color: white }} - td.blueish {{ background-color: #33CCFF; }} - th.red {{ background-color: #E10000; color: white }} - th.reddish {{ background-color: #FF6666; }} - th.orange {{ background-color: #FF9900; }} - th.orangish{{ background-color: #FFBB44; }} - th.yellow {{ background-color: #EDED00; }} - th.yellowish {{ background-color: #FFFF99; }} - th.green {{ background-color: #00C000; }} - th.greenyellow {{background-color: #99FF00; }} - th.blue {{ background-color: #0000FF; color: white }} - th.blueish {{ background-color: #33CCFF; }} + + table {{ + font-family: arial, sans-serif; + border-collapse: collapse; + }} + + td {{ + border: 1px solid #dddddd; + text-align: left; + padding: 8px; + }} + + th {{ + border: 1px solid #dddddd; + background-color: #8f8f8f; + text-align: left; + padding: 8px; + }} @@ -764,7 +235,8 @@ def writeHeader( self, sp, refresh ): # Notations: try: platform = socket.gethostname() - except: + except Exception as e: + logger.debug( str( e ) ) logger.debug( "Could not get host name" ) platform = "unknown" @@ -773,94 +245,66 @@ def writeHeader( self, sp, refresh ): else: username = os.getenv( "USER" ) - header += f""" -

- - - -
- Test results: {gentime}
- User: {username}
- Platform: {platform}
- """ - - for line in config.report_notations: - header += f"{line}
" - - header += """
-

- """ - + header += "

GEOS ATS Report

\n

Configuration

\n" + table = [ [ 'Test Results', gentime ], [ 'User', username ], [ 'Platform', platform ] ] + header += tabulate( table, tablefmt='html' ) + header += '\n' sp.write( header ) def writeSummary( self, sp ): - summary = """ - - - - - - - - """ - - haveRetry = False - for status in STATUS: - cases = self.groupResults[ status ] - num = len( cases ) - summary += f""" - - - - ' - - summary += '\n
SUMMARY
STATUS COUNT PROBLEM LIST
{self.displayName[status]}  {num} - """ - - if num > 0: - casestring = ' ' - for case in cases: - casename = case.name - caseref = case.name - retries = 0 - for test in case.testcases: - retries += getattr( test.testcase.atsGroup, "retries", 0 ) - if retries > 0: - haveRetry = True - casename += '*' - summary += f'\n {casename} ' - summary += '\n' - summary += casestring + link_pattern = '{}\n' + color_pattern = "

{}

" + header = [ 'Status', 'Count', 'Tests' ] + table = [] + + for k, v in self.status_lists.items(): + status_formatted = color_pattern.format( COLORS[ k ], k ) + test_links = [ link_pattern.format( t, t ) for t in v ] + table.append( [ status_formatted, len( v ), ', '.join( test_links ) ] ) + + sp.write( "\n\n

Summary

\n\n" ) + table_html = tabulate( table, headers=header, tablefmt='unsafehtml' ) + sp.write( table_html ) + + def writeTable( self, sp ): + header = ( "Status", "Name", "TestStep", "Elapsed", "Resources", "Output" ) + + table = [] + table_filt = [] + file_pattern = "{}" + color_pattern = "

{}

" + + for k, v in self.test_results.items(): + status_str = v.status.name + status_formatted = color_pattern.format( COLORS[ status_str ], k, status_str ) + step_shortname = v.current_step + elapsed_formatted = hms( v.elapsed ) + output_files = [] + for s in v.steps.values(): + if os.path.isfile( s.log ): + output_files.append( file_pattern.format( s.log, os.path.basename( s.log ) ) ) + if os.path.isfile( s.log + '.err' ): + output_files.append( file_pattern.format( s.log + '.err', os.path.basename( s.log + '.err' ) ) ) + for pattern in s.output: + for f in sorted( glob.glob( pattern ) ): + if ( ( 'restart' not in f ) or ( '.restartcheck' in f ) ) and os.path.isfile( f ): + output_files.append( file_pattern.format( f, os.path.basename( f ) ) ) + + row = [ status_formatted, k, step_shortname, elapsed_formatted, v.resources, ', '.join( output_files ) ] + if status_str == 'FILTERED': + table_filt.append( row ) else: - summary += '\n ' - - summary += '\n
' - if haveRetry: - summary += '\n* indicates that test was retried at least once.' - - sp.write( summary ) - - # Write link to documentation for html - def writeDoclink( self, sp ): - doc = """ -

- Test problem names with a hyperlink have been documented, - the HTML version of which can be viewed by clicking on the link. - """ + table.append( row ) - testdoc = os.path.join( config.report_doc_dir, 'testdoc.html' ) - testsumm = os.path.join( config.report_doc_dir, 'testdoc-summary.txt' ) - if os.path.exists( testdoc ) and os.path.exists( testsumm ): - doc += f""" -
- Or, you can click here for the - main page, or here for the - one page text summary. If the documentation appears out of - date, rerun 'atddoc' in this directory. - """ + if len( table ): + sp.write( "\n\n

Active Tests

\n\n" ) + table_html = tabulate( table, headers=header, tablefmt='unsafehtml' ) + sp.write( table_html ) - doc += '\n

' - sp.write( doc ) + if len( table_filt ): + sp.write( "\n\n

Filtered Tests

\n\n" ) + table_html = tabulate( table_filt, headers=header, tablefmt='unsafehtml' ) + sp.write( table_html ) def writeFooter( self, sp ): footer = """ @@ -868,247 +312,3 @@ def writeFooter( self, sp ): """ sp.write( footer ) - - def browser( self ): - if ReportHTML.launchedBrowser: - return - - if not config.browser: - return - - ReportHTML.launchedBrowser = True - command = config.browser_command.split() - command.append( "file:%s" % config.report_html_file ) - subprocess.Popen( command ) - - -class ReportWait( ReportBase ): - """This class is used while with the report_wait config option""" - - def __init__( self, testcases ): - ReportBase.__init__( self, testcases ) - self.testcases = testcases - - def report( self, fp ): - """Write out the text report to the give file pointer""" - import time - - start = time.time() - sleeptime = 60 # interval to check (seconds) - - while True: - notdone = [] - for t in self.testcases: - t.testReport() - report = ReportTestCase( t ) - if report.status in STATUS_NOTDONE: - notdone.append( t ) - - if notdone: - rr = ReportText( self.testcases ) - rr.writeSummary( sys.stdout, - ( FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, - INPROGRESS, FILTERED, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT ) ) - time.sleep( sleeptime ) - else: - break - - -class ReportTestCase( object ): - """This class represents the outcome from a TestCase. It hides - differences between off-line reports and the periodic reports - (when the actual ATS test object is known). In addition to - determining the testcase outcome, it also notes the last TestStep - that was run, age of the test, the total elapsed time and total - resources used.""" - - def __init__( self, testcase ): - - self.testcase = testcase # test_case - self.status = None # One of the STATUS values (e.g. FAILRUN, PASS, etc.) - self.laststep = None - self.diffage = None - self.elapsed = 0.0 - self.resources = 0.0 - - now = time.time() - outcome = None - teststatus = testcase.status - - # The following algorithm determines the outcome for this testcase by looking at the TestCase's status object. - if teststatus is None: - self.status = NOTRUN - return - elif teststatus in ( FILTERED, SKIP ): - self.status = teststatus - return - else: - for stepnum, step in enumerate( testcase.steps ): - - # Get the outcome and related information from the TestStep. - outcome, np, startTime, endTime = self._getStepInfo( step ) - - if outcome == "PASS": - # So far so good, move on to the next step - dt = endTime - startTime - self.elapsed += dt - self.resources += np * dt - continue - if outcome == "EXPT": - dt = endTime - startTime - self.elapsed += dt - self.resources += np * dt - outcome = "EXPECTEDFAIL" - self.status = EXPECTEDFAIL - break # don't continue past an expected failure - if outcome == "UNEX": - dt = endTime - startTime - self.elapsed += dt - self.resources += np * dt - outcome = "UNEXPECTEDPASS" - self.status = UNEXPECTEDPASS - break # don't continue past an unexpected pass - elif outcome == "SKIP": - self.status = SKIP - break - elif outcome == "EXEC": - # the step is currently running, break - self.laststep = step - self.status = RUNNING - dt = now - startTime - self.elapsed += dt - self.resources += np * dt - break - - if outcome == "INIT" or outcome == "BACH": - if stepnum == 0: - # The TestCase is scheduled to run, but has not yet started. - if outcome == "BACH": - self.status = BATCH - else: - self.status = NOTRUN - - break - else: - # At least one step in the TestCase has started (and passed), but nothing is running now. - self.status = INPROGRESS - self.laststep = step - if endTime: - self.diffage = now - endTime - dt = endTime - startTime - self.elapsed += dt - self.resources += np * dt - break - elif outcome == "FILT": - # The test won't run because of a filter - self.status = FILTERED - else: - # One of the failure modes. - self.laststep = step - if endTime: - self.diffage = now - endTime - dt = endTime - startTime - self.elapsed += dt - self.resources += np * dt - if outcome == "TIME": - self.status = TIMEOUT - elif self.laststep.isCheck(): - if self.laststep.isMinor(): - self.status = FAILCHECKMINOR - else: - self.status = FAILCHECK - else: - if self.laststep.isMinor(): - self.status = FAILRUNOPTIONAL - else: - self.status = FAILRUN - try: - with open( step.p.stdout, 'r' ) as fp: - for line in fp: - if re.search( config.report_notbuilt_regexp, line ): - self.status = NOTBUILT - break - except: - pass - break - - if outcome is None: - self.status = NOTRUN - - if outcome == "PASS": - # Don't set the laststep, but use it to get the endTime - self.status = PASS - laststep = step - laststatus = teststatus.findStep( laststep ) - assert ( laststatus ) - self.diffage = now - laststatus[ "endTime" ] - - assert self.status in STATUS - - def _getStepInfo( self, teststep ): - """This function hides the differences between the TestStatus - files and the information you can get from the ats test - object. It returns (status, np, startTime, endTime )""" - - atsTest = getattr( teststep, "atsTest", None ) - endTime = None - startTime = None - - if atsTest is not None: - status = str( atsTest.status ) - startTime = getattr( atsTest, "startTime", None ) - endTime = getattr( atsTest, "endTime", None ) - if status == "PASS" and atsTest.expectedResult == ats.FAILED: - status = "FAIL" - if status == "FAIL" and atsTest.expectedResult == ats.FAILED: - status = "UNEX" - else: - stepstatus = self.testcase.status.findStep( teststep ) - if stepstatus is None: - status = "INIT" - else: - status = stepstatus[ "result" ] - startTime = stepstatus[ "startTime" ] - endTime = stepstatus[ "endTime" ] - - np = getattr( teststep.p, "np", 1 ) - - if status in ( "SKIP", "FILT", "INIT", "PASS", "FAIL", "TIME", "EXEC", "BACH", "EXPT", "UNEX" ): - return ( status, np, startTime, endTime ) - else: - return ( "SKIP", np, startTime, endTime ) - - -class ReportGroup( object ): - """A class to represent a group of TestCases. Currently, the only - grouping done is at the directory level: every testcase in a - directory belongs to the same ReportGroup.""" - - def __init__( self, groupName, testcases ): - self.name = groupName - self.testcases = testcases - self.status = NOTRUN - if self.testcases: - self.status = min( [ case.status for case in self.testcases ] ) - assert self.status in STATUS - - def __cmp__( self, other ): - return self.name == other.name - - -def getowner( dirname, testcase=None ): - owner = "" - if not config.report_doc_link: - try: - atdfile = os.path.join( config.report_doc_dir, dirname, dirname + ".atd" ) - with open( atdfile, "r" ) as fp: - for line in fp: - match = re.search( "CUSTODIAN:: +(.*)$", line ) - if not match: - owner = match.group( 1 ) - break - except IOError as e: - logger.debug( e ) - if owner == "" and testcase and ( "owner" in testcase.dictionary ): - return testcase.dictionary[ "owner" ] - return owner diff --git a/geos_ats_package/geos_ats/rules.py b/geos_ats_package/geos_ats/rules.py deleted file mode 100644 index 741b5e2ac..000000000 --- a/geos_ats_package/geos_ats/rules.py +++ /dev/null @@ -1,225 +0,0 @@ -#!/bin/env python - -import optparse -import subprocess -import os -import sys -#import glob -import shutil -import logging - -logger = logging.getLogger( 'geos_ats' ) - - -def switch( booleans, i ): - booleans[ i ] = not booleans[ i ] - - -def DeclareCompoundRuleClass( name, RuleA, RuleB ): - """ - Declares a class of name name that is a new rule that is - the combination of 2 base rules. - """ - tmp = type( name, ( RuleA, RuleB ), {} ) - tmp.numToggles = RuleA.numToggles + RuleB.numToggles - tmp.numCombinations = RuleA.numCombinations * RuleB.numCombinations - - # Define the initializer for the new class - def newInit( self, toggles ): - RuleA.__init__( self, toggles, 0, RuleA.numToggles ) - RuleB.__init__( self, toggles, RuleA.numToggles ) - - tmp.__init__ = newInit - globals()[ name ] = tmp - return tmp - - -def GenRules( RuleType ): - """ Generator that produces a rule for each possible combination of toggles""" - - nt = RuleType.numToggles - nc = RuleType.numCombinations - """" toggles is [1,2,4,8,16,...] masked by the bitmap of the rulecount. - For example, if nt = 3 (and thus nc = 8), resulting generated toggles are: - [0,0,0] - [1,0,0] - [0,2,0] - [1,2,0] - [0,0,4] - [1,0,4] - [0,2,4] - [1,2,4] - Note that the resulting rule can be uniquely ID'd by the sum of the toggle array. -""" - - for i in range( nc ): - toggles = [ i & pow( 2, x ) for x in range( nt ) ] - tmp = RuleType( toggles ) - tmp.refresh() - yield tmp - - -class Rule( object ): - """ Base class for the rules""" - - def __init__( self, nToggles, nCombinations, toggles ): - self.numToggles = nToggles - self.numCombinations = nCombinations - self.toggles = toggles - self.repStrings = {} - """ Assumes toggles is set in a way consistent with what is done in GenRules""" - self.id = sum( self.toggles ) - self.repStrings[ "@@POS@@" ] = str( self.id ) - - def GetPosition( self ): - return self.id * 1.0 - - def refresh( self ): - pass - - def replaceString( self, string ): - tmp = string - for s in self.repStrings: - tmp = tmp.replace( s, self.repStrings[ s ] ) - return tmp - - def sedFile( self, fIn, fOut ): - inFile = open( fIn ) - outFile = open( fOut, 'w' ) - for line in inFile: - outFile.write( self.replaceString( line ) ) - inFile.close() - outFile.close() - - def checkTimehist( self ): - # timehist - logger.error( 'checkTimehist method not defined' ) - - -class SetupRules( Rule ): - numToggles = 2 - numCombinations = pow( 2, numToggles ) - - def __init__( self, toggles, minToggle=0, maxToggle=None ): - self.setupMin = minToggle - self.setupMax = maxToggle - Rule.__init__( self, SetupRules.numToggles, SetupRules.numCombinations, toggles ) - - def refresh( self ): - mtoggles = self.toggles[ self.setupMin:self.setupMax ] - - underscoredName = mtoggles[ 0 ] - self.isTenthCycle = mtoggles[ 1 ] - - self.baseName = "foo%i" % self.id - self.baseName = "%s%s" % ( self.baseName, "_001" if underscoredName else "" ) - self.repStrings[ "@@BASE@@" ] = self.baseName - - self.inputDeck = "%s.in" % self.baseName - self.repStrings[ "@@DECK@@" ] = self.inputDeck - - self.restartBaseName = "%s_001" % self.baseName - self.restartName = "%s_%s" % ( self.restartBaseName, "00010" if self.isTenthCycle else "00000" ) - self.repStrings[ "@@RF@@" ] = self.restartName - - super( SetupRules, self ).refresh() - - def GetInputDeckName( self ): - return self.inputDeck - - def GetInitialRestartName( self ): - return self.restartName - - def GetBaseName( self ): - return self.baseName - - -class CommandLineRules( Rule ): - numToggles = 2 - numCombinations = pow( 2, numToggles ) - - def __init__( self, toggles, minToggle=0, maxToggle=None ): - self.clMin = minToggle - self.clMax = maxToggle - Rule.__init__( self, CommandLineRules.numToggles, CommandLineRules.numCombinations, toggles ) - - def refresh( self ): - mtoggles = self.toggles[ self.clMin:self.clMax ] - self.probDefined = mtoggles[ 0 ] # use the -prob flag - self.restartDefined = mtoggles[ 1 ] # use the -rf flag - - # self.prob = "-prob %s" % "@@BASE@@" if self.probDefined else "" - # self.rf = "-rf %s" % "@@RF@@" if self.restartDefined else "" - self.prob = "@@BASE@@" if self.probDefined else "" - self.rf = "@@RF@@" if self.restartDefined else "" - - self.repStrings[ "@@CL_PROB@@" ] = self.prob - self.repStrings[ "@@CL_RF@@" ] = self.rf - - super( CommandLineRules, self ).refresh() - - -def main(): - - generator = GenRules( SetupRules ) - for rule in generator: - vals = ( rule.GetInputDeckName(), rule.GetInitialRestartName(), rule.GetPosition() ) - logger.debug( rule.replaceString( "InputDeck: %s\tRestartFile: %s\tPos: %f" % vals ) ) - - DeclareCompoundRuleClass( "SetupCommand", SetupRules, CommandLineRules ) - logger.debug( SetupCommand.numCombinations ) - generator = GenRules( SetupCommand ) - logger.debug( "compound:" ) - for rule in generator: - vals = ( rule.GetInputDeckName(), rule.GetInitialRestartName(), rule.GetPosition(), rule.prob, rule.rf ) - logger.debug( rule.replaceString( "InputDeck: %s\tRestartFile: %s\tPos: %f\t%s\t%s" % vals ) ) - - return - - dbg = True - parser = optparse.OptionParser() - - # argument to check results of pdldiff script - # parser.add_option("-p", "--pdldiff", type = "string", dest = "pdldiff" ) - ( options, args ) = parser.parse_args() - # assert options.gnuplot - - assert len( args ) == 4 - - base = args[ 0 ] - sourceDeck = args[ 1 ] - atsFile = args[ 2 ] - outdir = args[ 3 ] - assert os.path.exists( sourceDeck ) - assert os.path.exists( atsFile ) - - if os.path.exists( outdir ): - try: - shutil.rmtree( outdir ) - except: - logger.debug( f"Could not remove directory: {outdir}" ) - - # make a directory - try: - os.mkdir( outdir ) - # copy in the input deck and other necessary files for running the problem - shutil.copy( sourceDeck, os.path.join( outdir, "%s.ain" % base ) ) - shutil.copy( "leos1.05.h5", outdir ) - except: - logger.debug( f"Could not create directory: {outdir}" ) - - # copy in the ats file template, replacing appropriate text as we go - outp = open( os.path.join( outdir, "%s.ats" % base ), 'w' ) - inp = open( atsFile, 'r' ) - for line in inp: - line = line.replace( "BASE", base ) - outp.write( line ) - # sub = subprocess.call(['sed', 's/BASE/%s/'%base,atsFile],stdout=outp) - inp.close() - outp.close() - - sys.exit( 0 ) - - -if __name__ == "__main__": - main() diff --git a/geos_ats_package/geos_ats/test_builder.py b/geos_ats_package/geos_ats/test_builder.py index 83f9ca7ad..cb1de7112 100644 --- a/geos_ats_package/geos_ats/test_builder.py +++ b/geos_ats_package/geos_ats/test_builder.py @@ -7,9 +7,13 @@ from dataclasses import dataclass, asdict from ats.tests import AtsTest from lxml import etree +import logging from .test_steps import geos from .test_case import TestCase +test_build_failures = [] +logger = logging.getLogger( 'geos_ats' ) + @dataclass( frozen=True ) class RestartcheckParameters: @@ -79,7 +83,7 @@ def collect_block_names( fname ): return results -def generate_geos_tests( decks: Iterable[ TestDeck ] ): +def generate_geos_tests( decks: Iterable[ TestDeck ], test_type='smoke' ): """ """ for ii, deck in enumerate( decks ): @@ -99,7 +103,18 @@ def generate_geos_tests( decks: Iterable[ TestDeck ] ): testcase_name = "{}_{:02d}".format( deck.name, N ) base_name = "0to{:d}".format( deck.check_step ) - xml_file = "{}.xml".format( deck.name ) + + # Search for the target xml file + xml_file = '' + for suffix in [ '', f'_{test_type}' ]: + if os.path.isfile( "{}{}.xml".format( deck.name, suffix ) ): + xml_file = "{}{}.xml".format( deck.name, suffix ) + + if not xml_file: + logger.error( f'Could not find a matching xml file for the test: {deck.name}' ) + test_build_failures.append( deck.name ) + continue + xml_blocks = collect_block_names( xml_file ) checks = [] diff --git a/geos_ats_package/geos_ats/test_case.py b/geos_ats_package/geos_ats/test_case.py index e3141b0f9..fbeeb0390 100644 --- a/geos_ats_package/geos_ats/test_case.py +++ b/geos_ats_package/geos_ats/test_case.py @@ -1,24 +1,21 @@ import ats # type: ignore[import] import os -import sys import shutil -import errno import logging import glob +import inspect +from configparser import ConfigParser +from ats import atsut +from ats import ( PASSED, FAILED, FILTERED, SKIPPED ) +from geos_ats.common_utilities import Error, Log, removeLogDirectories +from geos_ats.configuration_record import config, globalTestTimings test = ats.manager.test testif = ats.manager.testif - -from geos_ats.suite_settings import testLabels, testOwners -from geos_ats.common_utilities import Error, Log, InfoTopic, TextTable, removeLogDirectories -from geos_ats.configuration_record import config, globalTestTimings -from geos_ats import reporting -from geos_ats import test_modifier - -TESTS = {} -BASELINE_PATH = "baselines" logger = logging.getLogger( 'geos_ats' ) +all_test_names = [] + class Batch( object ): """A class to represent batch options""" @@ -56,59 +53,66 @@ def __init__( self, name, desc, label=None, labels=None, steps=[], **kw ): raise Exception( e ) def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch( enabled=False ), **kw ): + # Check for duplicate tests + if name in all_test_names: + raise Exception( f'Found multiple tests with the same name ({name})' ) + all_test_names.append( name ) + # Setup the test self.name = name self.desc = desc self.batch = batch - action = ats.tests.AtsTest.getOptions().get( "action" ) + # Identify the location of the ats test file + ats_root_dir = ats.tests.AtsTest.getOptions().get( "atsRootDir" ) + self.dirname = '' + for s in inspect.stack(): + if ats_root_dir in s.filename: + self.dirname = os.path.dirname( s.filename ) + break - if kw.get( "output_directory", False ): - self.path = os.path.abspath( kw.get( "output_directory" ) ) - else: - self.path = os.path.join( os.getcwd(), self.name ) + if not self.dirname: + logger.warning( 'Could not find the proper test location... defaulting to current dir' ) + self.dirname = os.getcwd() + + # Setup paths + log_dir = ats.tests.AtsTest.getOptions().get( "logDir" ) + working_relpath = os.path.relpath( self.dirname, ats_root_dir ) + working_root = ats.tests.AtsTest.getOptions().get( "workingDir" ) + working_dir = os.path.abspath( os.path.join( working_root, working_relpath, self.name ) ) - self.dirname = os.path.basename( self.path ) + baseline_relpath = working_relpath + baseline_root = ats.tests.AtsTest.getOptions().get( "baselineDir" ) + baseline_directory = os.path.abspath( os.path.join( baseline_root, baseline_relpath, self.name ) ) + + self.path = working_relpath try: - os.makedirs( self.path ) + os.makedirs( working_dir, exist_ok=True ) except OSError as e: - if e.errno == errno.EEXIST and os.path.isdir( self.path ): - pass - else: - logger.debug( e ) - raise Exception() + logger.debug( e ) + raise Exception() - self.atsGroup = None + # Setup other parameters self.dictionary = {} self.dictionary.update( kw ) self.nodoc = self.dictionary.get( "nodoc", False ) - self.statusFile = os.path.abspath( "TestStatus_%s" % self.name ) - self.status = None - self.outname = os.path.join( self.path, "%s.data" % self.name ) - self.errname = os.path.join( self.path, "%s.err" % self.name ) + self.last_status = None self.dictionary[ "name" ] = self.name - self.dictionary[ "output_directory" ] = self.path - self.dictionary[ "baseline_dir" ] = os.path.join( os.getcwd(), BASELINE_PATH, self.dirname ) - self.dictionary[ "testcase_out" ] = self.outname - self.dictionary[ "testcase_err" ] = self.errname + self.dictionary[ "test_directory" ] = self.dirname + self.dictionary[ "output_directory" ] = working_dir + self.dictionary[ "baseline_directory" ] = baseline_directory + self.dictionary[ "log_directory" ] = log_dir self.dictionary[ "testcase_name" ] = self.name - # check for test cases, testcases can either be the string - # "all" or a list of full test names. - testcases = ats.tests.AtsTest.getOptions().get( "testcases" ) - if testcases == "all": - pass - elif self.name in testcases: - testcases.remove( self.name ) - pass - else: - return - - if self.name in TESTS: - Error( "Name already in use: %s" % self.name ) - - TESTS[ self.name ] = self + # Check for previous log information + log_file = os.path.join( log_dir, 'test_results.ini' ) + if os.path.isfile( log_file ): + previous_config = ConfigParser() + previous_config.read( log_file ) + for k, v in previous_config[ 'Results' ].items(): + if self.name in v.split( ';' ): + self.last_status = atsut.StatusCode( k.upper() ) # check for independent if config.override_np > 0: @@ -127,8 +131,6 @@ def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch # This check avoid testcases depending on themselves. self.depends = None - self.handleLabels( label, labels ) - # complete the steps. # 1. update the steps with data from the dictionary # 2. substeps are inserted into the list of steps (the steps are flattened) @@ -139,27 +141,11 @@ def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch for step in steps: step.insertStep( self.steps ) - # test modifier - modifier = test_modifier.Factory( config.testmodifier ) - newSteps = modifier.modifySteps( self.steps, self.dictionary ) - if newSteps: - # insert the modified steps, including any extra steps that may have - # been added by the modifier. - self.steps = [] - for step in newSteps: - step.insertStep( self.steps ) - for extraStep in step.extraSteps: - extraStep.insertStep( newSteps ) - self.steps = newSteps - else: - Log( "# SKIP test=%s : testmodifier=%s" % ( self.name, config.testmodifier ) ) - self.status = reporting.SKIP - return - # Check for explicit skip flag + action = ats.tests.AtsTest.getOptions().get( "action" ) if action in ( "run", "rerun", "continue" ): if self.dictionary.get( "skip", None ): - self.status = reporting.SKIP + self.status = SKIPPED return # Filtering tests on maxprocessors @@ -167,7 +153,7 @@ def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch if config.filter_maxprocessors != -1: if npMax > config.filter_maxprocessors: Log( "# FILTER test=%s : max processors(%d > %d)" % ( self.name, npMax, config.filter_maxprocessors ) ) - self.status = reporting.FILTERED + self.status = FILTERED return # Filtering tests on maxGPUS @@ -185,7 +171,7 @@ def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch if npMax > totalNumberOfProcessors: Log( "# SKIP test=%s : not enough processors to run (%d > %d)" % ( self.name, npMax, totalNumberOfProcessors ) ) - self.status = reporting.SKIP + self.status = SKIPPED return # If the machine doesn't specify a number of GPUs then it has none. @@ -193,7 +179,7 @@ def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch if ngpuMax > totalNumberOfGPUs: Log( "# SKIP test=%s : not enough gpus to run (%d > %d)" % ( self.name, ngpuMax, totalNumberOfGPUs ) ) - self.status = reporting.SKIP + self.status = SKIPPED return # filtering test steps based on action @@ -214,23 +200,6 @@ def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch reorderedSteps.append( step ) self.steps = reorderedSteps - # filter based on previous results: - if action in ( "run", "check", "continue" ): - # read the status file - self.status = test_caseStatus( self ) - - # if previously passed then skip - if self.status.isPassed(): - Log( "# SKIP test=%s (previously passed)" % ( self.name ) ) - # don't set status here, as we want the report to reflect the pass - return - - if action == "continue": - if self.status.isFailed(): - Log( "# SKIP test=%s (previously failed)" % ( self.name ) ) - # don't set status here, as we want the report to reflect the pass - return - # Perform the action: if action in ( "run", "continue" ): Log( "# run test=%s" % ( self.name ) ) @@ -247,10 +216,6 @@ def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch elif action == "commands": self.testCommands() - elif action == "reset": - if self.testReset(): - Log( "# reset test=%s" % ( self.name ) ) - elif action == "clean": Log( "# clean test=%s" % ( self.name ) ) self.testClean() @@ -268,15 +233,15 @@ def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch elif action == "list": self.testList() - elif action in ( "report" ): - self.testReport() - else: Error( "Unknown action?? %s" % action ) + def logNames( self ): + return sorted( glob.glob( os.path.join( self.dictionary[ "log_directory" ], f'*{self.name}_*' ) ) ) + def resultPaths( self, step=None ): """Return the paths to output files for the testcase. Used in reporting""" - paths = [ self.outname, self.errname ] + paths = [] if step: for x in step.resultPaths(): fullpath = os.path.join( self.path, x ) @@ -285,19 +250,12 @@ def resultPaths( self, step=None ): return paths - def testReset( self ): - self.status = test_caseStatus( self ) - ret = self.status.resetFailed() - self.status.writeStatusFile() - return ret + def cleanLogs( self ): + for f in self.logNames(): + os.remove( f ) def testClean( self ): - if os.path.exists( self.statusFile ): - os.remove( self.statusFile ) - if os.path.exists( self.outname ): - os.remove( self.outname ) - if os.path.exists( self.errname ): - os.remove( self.errname ) + self.cleanLogs() for step in self.steps: step.clean() @@ -322,7 +280,6 @@ def _remove( path ): # remove extra files if len( self.steps ) > 0: _remove( config.report_html_file ) - _remove( config.report_text_file ) _remove( self.path ) _remove( "*.core" ) _remove( "core" ) @@ -347,19 +304,8 @@ def findMaxNumberOfGPUs( self ): return gpuMax def testCreate( self ): - atsTest = None - keep = ats.tests.AtsTest.getOptions().get( "keep" ) - - # remove outname - if os.path.exists( self.outname ): - os.remove( self.outname ) - if os.path.exists( self.errname ): - os.remove( self.errname ) - - # create the status file - if self.status is None: - self.status = test_caseStatus( self ) - + # Remove old logs + self.cleanLogs() maxnp = 1 for stepnum, step in enumerate( self.steps ): np = getattr( step.p, "np", 1 ) @@ -372,31 +318,15 @@ def testCreate( self ): else: priority = 1 - # start a group + # Setup a new test group + atsTest = None ats.tests.AtsTest.newGroup( priority=priority ) - - # keep a reference to the ats test group - self.atsGroup = ats.tests.AtsTest.group - - # if depends - if self.depends: - priorTestCase = TESTS.get( self.depends, None ) - if priorTestCase is None: - Log( "Warning: Test %s depends on testcase %s, which is not scheduled to run" % - ( self.name, self.depends ) ) - else: - if priorTestCase.steps: - atsTest = getattr( priorTestCase.steps[ -1 ], "atsTest", None ) - for stepnum, step in enumerate( self.steps ): - np = getattr( step.p, "np", 1 ) ngpu = getattr( step.p, "ngpu", 0 ) executable = step.executable() args = step.makeArgs() - - # set the label - label = "%s/%s_%d_%s" % ( self.dirname, self.name, stepnum + 1, step.label() ) + label = "%s_%d_%s" % ( self.name, stepnum + 1, step.label() ) # call either 'test' or 'testif' if atsTest is None: @@ -404,12 +334,10 @@ def testCreate( self ): else: func = lambda *a, **k: testif( atsTest, *a, **k ) - # timelimit + # Set the time limit kw = {} - if self.batch.enabled: kw[ "timelimit" ] = self.batch.duration - if ( step.timelimit() and not config.override_timelimit ): kw[ "timelimit" ] = step.timelimit() else: @@ -424,31 +352,11 @@ def testCreate( self ): independent=self.independent, batch=self.batch.enabled, **kw ) + atsTest.step_outputs = step.resultPaths() - # ats test gets a reference to the TestStep and the TestCase - atsTest.geos_atsTestCase = self - atsTest.geos_atsTestStep = step - - # TestStep gets a reference to the atsTest - step.atsTest = atsTest - - # Add the step the test status object - self.status.addStep( atsTest ) - - # set the expected result - if step.expectedResult() == "FAIL" or step.expectedResult() is False: - atsTest.expectedResult = ats.FAILED - # The ATS does not permit tests to depend on failed tests. - # therefore we need to break here - self.steps = self.steps[ :stepnum + 1 ] - break - - # end the group + # End the group ats.tests.AtsTest.endGroup() - self.status.resetFailed() - self.status.writeStatusFile() - def commandLine( self, step ): args = [] executable = step.executable() @@ -501,230 +409,12 @@ def testRebaseline( self ): def testRebaselineFailed( self ): config.rebaseline_ask = False - self.status = test_caseStatus( self ) - if self.status.isFailed(): + if self.last_status == FAILED: self.testRebaseline() def testList( self ): Log( "# test=%s : labels=%s" % ( self.name.ljust( 32 ), " ".join( self.labels ) ) ) - def testReport( self ): - self.status = test_caseStatus( self ) - - def handleLabels( self, label, labels ): - """set the labels, and verify they are known to the system, the avoid typos""" - if labels is not None and label is not None: - Error( "specify only one of 'label' or 'labels'" ) - - if label is not None: - self.labels = [ label ] - elif labels is not None: - self.labels = labels - else: - self.labels = [] - - for x in self.labels: - if x not in testLabels: - Error( f"unknown label {x}. run 'geos_ats -i labels' for a list" ) - - -class test_caseStatus( object ): - - def __init__( self, testCase ): - self.testCase = testCase - self.statusFile = self.testCase.statusFile - self.readStatusFile() - - def readStatusFile( self ): - if os.path.exists( self.statusFile ): - f = open( self.statusFile, "r" ) - self.status = [ eval( x.strip() ) for x in f.readlines() ] - f.close() - else: - self.status = [] - - def writeStatusFile( self ): - assert self.status is not None - - with open( self.statusFile, "w" ) as f: - f.writelines( [ str( s ) + '\n' for s in self.status ] ) - - def testKey( self, step ): - np = getattr( step.p, "np", 1 ) - key = str( ( np, step.label(), step.executable(), step.makeArgsForStatusKey() ) ) - return key - - def testData( self, test ): - key = self.testKey( test.geos_atsTestStep ) - result = test.status - - if result == ats.PASSED and test.expectedResult == ats.FAILED: - result = ats.FAILED - endTime = getattr( test, "endTime", None ) - startTime = getattr( test, "startTime", None ) - data = {} - data[ "key" ] = key - data[ "result" ] = str( result ) - data[ "startTime" ] = startTime - data[ "endTime" ] = endTime - return key, data - - def findStep( self, step ): - key = self.testKey( step ) - for s in self.status: - if key in s[ "key" ]: - return s - - return None - - def isPassed( self ): - for step in self.testCase.steps: - status = self.findStep( step ) - if status: - if status[ "result" ] == "EXPT": - # do not continue after an expected fail - return True - elif status[ "result" ] == "PASS": - continue - else: - return False - else: - return False - return True - - def isFailed( self ): - for step in self.testCase.steps: - status = self.findStep( step ) - if status: - if status[ "result" ] == "EXPT": - # do not continue after an expected fail - return False - elif status[ "result" ] == "PASS": - continue - elif status[ "result" ] == "FAIL": - return True - else: - return False - else: - return False - return False - - def resetFailed( self ): - ret = False - for step in self.testCase.steps: - status = self.findStep( step ) - if status: - if status[ "result" ] == "EXPT": - # do not continue after an expected fail - status[ "result" ] = "INIT" - ret = True - elif status[ "result" ] == "FAIL": - status[ "result" ] = "INIT" - ret = True - else: - continue - return ret - - def totalTime( self ): - total = 0.0 - for step in self.testCase.steps: - status = self.findStep( step ) - if status: - steptime = status[ "endTime" ] - status[ "startTime" ] - assert steptime >= 0 - total += steptime - return total - - def addStep( self, test ): - key, data = self.testData( test ) - found = False - for s in self.status: - if key == s[ "key" ]: - found = True - break - - if not found: - self.status.append( data ) - - def noteEnd( self, test ): - """Update the TestStatus file for this test case""" - # update the status - key, data = self.testData( test ) - - self.readStatusFile() - found = False - for i, s in enumerate( self.status ): - if key in s[ "key" ]: - self.status[ i ] = data - found = True - break - - if not found: - logger.warning( f"NOT FOUND: {key} {self.statusFile}" ) - assert found - self.writeStatusFile() - - # append to stdout/stderr file - for stream in ( "outname", "errname" ): - sourceFile = getattr( test, stream ) - dataFile = getattr( self.testCase, stream ) - - if not os.path.exists( sourceFile ): - continue - - # Append to the TestCase files - f1 = open( dataFile, "a" ) - f2 = open( sourceFile, "r" ) - f1.write( ":" * 20 + "\n" ) - f1.write( self.testCase.commandLine( test.geos_atsTestStep ) + "\n" ) - f1.write( ":" * 20 + "\n" ) - f1.write( f2.read() ) - f1.close() - f2.close() - - # Copy the stdout or stderr, if requested - if stream == "outname": - destFile = test.geos_atsTestStep.saveOut() - else: - destFile = test.geos_atsTestStep.saveErr() - - if destFile: - destFile = os.path.join( self.testCase.path, destFile ) - shutil.copy( sourceFile, destFile ) - - # If this is the last step (and it passed), clean the temporary files - if config.clean_on_pass: - lastStep = ( test.geos_atsTestStep is self.testCase.steps[ -1 ] ) - if lastStep and self.isPassed(): - for step in self.testCase.steps: - step.clean() - - -def infoTestCase( *args ): - """This function is used to print documentation about the testcase""" - - topic = InfoTopic( "testcase" ) - topic.startBanner() - - logger.info( "Required parameters" ) - table = TextTable( 3 ) - table.addRow( "name", "required", "The name of the test problem" ) - table.addRow( "desc", "required", "A brief description" ) - table.addRow( "label", "required", "A string or sequence of strings to tag the TestCase. See info topic 'labels'" ) - table.addRow( "owner", "optional", - "A string or sequence of strings of test owners for this TestCase. See info topic 'owners'" ) - table.addRow( - "batch", "optional", "A Batch object. Batch(enabled=True, duration='1h', ppn=0, altname=None)." - " ppn is short for processors per node (0 means to use the global default)." - " altname will be used for the batch job's name if supplied, otherwise the full name of the test case is used." - ), - table.addRow( "depends", "optional", "The name of a testcase that this testcase depends" ) - table.addRow( "steps", "required", "A sequence of TestSteps objects. See info topic 'teststeps'" ) - - table.printTable() - - topic.endBanner() - # Make available to the tests ats.manager.define( TestCase=TestCase ) diff --git a/geos_ats_package/geos_ats/test_steps.py b/geos_ats_package/geos_ats/test_steps.py index 57d39ac9d..54f7a2f7f 100644 --- a/geos_ats_package/geos_ats/test_steps.py +++ b/geos_ats_package/geos_ats/test_steps.py @@ -111,8 +111,10 @@ class TestStepBase( object ): TestParam( "check", "True or False. determines whether the default checksteps will " "be automatically be added after this step.", "True" ), - "baseline_dir": - TestParam( "baseline_dir", "subdirectory of config.testbaseline_dir where the test " + "test_directory": + TestParam( "test_directory", "subdirectory holding the test definitions", "" ), + "baseline_directory": + TestParam( "baseline_directory", "subdirectory of config.testbaseline_directory where the test " "baselines are located.", "" ), "output_directory": TestParam( "output_directory", "subdirectory where the test log, params, rin, and " @@ -126,7 +128,7 @@ class TestStepBase( object ): " timehist curves.", "testmode..ul" ), "basetimehistfile": TestParam( "basetimehistfile", "location to the baseline timehistfile", - "//" ), + "//" ), "allow_rebaseline": TestParam( "allow_rebaseline", "True if the second file should be re-baselined during a rebaseline action." @@ -386,8 +388,8 @@ class geos( TestStepBase ): params = TestStepBase.defaultParams + ( TestStepBase.commonParams[ "name" ], TestStepBase.commonParams[ "deck" ], TestStepBase.commonParams[ "np" ], TestStepBase.commonParams[ "ngpu" ], TestStepBase.commonParams[ "check" ], - TestStepBase.commonParams[ "baseline_dir" ], TestStepBase.commonParams[ "output_directory" ], - TestParam( "restart_file", "The name of the restart file." ), + TestStepBase.commonParams[ "test_directory" ], TestStepBase.commonParams[ "baseline_directory" ], + TestStepBase.commonParams[ "output_directory" ], TestParam( "restart_file", "The name of the restart file." ), TestParam( "x_partitions", "The number of partitions in the x direction." ), TestParam( "y_partitions", "The number of partitions in the y direction." ), TestParam( "z_partitions", @@ -419,6 +421,9 @@ def __init__( self, restartcheck_params=None, curvecheck_params=None, **kw ): if restartcheck_params is not None: self.checksteps.append( restartcheck( restartcheck_params, **kw ) ) + if not self.checksteps: + raise Exception( f'This test does not have a restart or curve check enabled: {self.p.deck}' ) + def label( self ): return "geos" @@ -438,8 +443,9 @@ def update( self, dictionary ): self.requireParam( "deck" ) self.requireParam( "name" ) - self.requireParam( "baseline_dir" ) + self.requireParam( "baseline_directory" ) self.requireParam( "output_directory" ) + self.requireParam( "test_directory" ) self.handleCommonParams() @@ -463,10 +469,10 @@ def makeArgs( self ): args = [] if self.p.deck: - args += [ "-i", self.p.deck ] + args += [ "-i", os.path.join( self.p.test_directory, self.p.deck ) ] if self.p.restart_file: - args += [ "-r", self.p.restart_file ] + args += [ "-r", os.path.abspath( os.path.join( self.p.output_directory, '..', self.p.restart_file ) ) ] if self.p.x_partitions: args += [ "-x", self.p.x_partitions ] @@ -496,12 +502,8 @@ def makeArgs( self ): return list( map( str, args ) ) def resultPaths( self ): - paths = [] name = getGeosProblemName( self.p.deck, self.p.name ) - paths += [ os.path.join( self.p.output_directory, "%s_restart_*" ) % name ] - paths += [ os.path.join( self.p.output_directory, "silo*" ) ] - paths += [ os.path.join( self.p.output_directory, "%s_bp_*" % name ) ] - + paths = [ os.path.join( self.p.output_directory, f"{name}_restart_*" ) ] return paths def clean( self ): @@ -522,7 +524,7 @@ class restartcheck( CheckTestStepBase ): params = TestStepBase.defaultParams + CheckTestStepBase.checkParams + ( TestStepBase.commonParams[ "deck" ], TestStepBase.commonParams[ "name" ], TestStepBase.commonParams[ "np" ], - TestStepBase.commonParams[ "allow_rebaseline" ], TestStepBase.commonParams[ "baseline_dir" ], + TestStepBase.commonParams[ "allow_rebaseline" ], TestStepBase.commonParams[ "baseline_directory" ], TestStepBase.commonParams[ "output_directory" ], TestParam( "file_pattern", "Regex pattern to match file written out by geos." ), TestParam( "baseline_pattern", "Regex pattern to match file to compare against." ), @@ -560,7 +562,7 @@ def update( self, dictionary ): self.handleCommonParams() self.requireParam( "deck" ) - self.requireParam( "baseline_dir" ) + self.requireParam( "baseline_directory" ) self.requireParam( "output_directory" ) if self.p.file_pattern is None: @@ -569,7 +571,7 @@ def update( self, dictionary ): self.p.baseline_pattern = self.p.file_pattern self.restart_file_regex = os.path.join( self.p.output_directory, self.p.file_pattern ) - self.restart_baseline_regex = os.path.join( self.p.baseline_dir, self.p.baseline_pattern ) + self.restart_baseline_regex = os.path.join( self.p.baseline_directory, self.p.baseline_pattern ) if self.p.allow_rebaseline is None: self.p.allow_rebaseline = True @@ -613,7 +615,7 @@ def rebaseline( self ): raise IOError( "File not found matching the pattern %s in directory %s." % ( self.restart_file_regex, os.getcwd() ) ) - baseline_dir = os.path.dirname( self.restart_baseline_regex ) + baseline_directory = os.path.dirname( self.restart_baseline_regex ) root_baseline_path = findMaxMatchingFile( self.restart_baseline_regex ) if root_baseline_path is not None: @@ -623,13 +625,13 @@ def rebaseline( self ): data_dir_path = os.path.splitext( root_baseline_path )[ 0 ] shutil.rmtree( data_dir_path ) else: - os.makedirs( baseline_dir, exist_ok=True ) + os.makedirs( baseline_directory, exist_ok=True ) # Copy the root file into the baseline directory. - shutil.copy2( root_file_path, os.path.join( baseline_dir, os.path.basename( root_file_path ) ) ) + shutil.copy2( root_file_path, os.path.join( baseline_directory, os.path.basename( root_file_path ) ) ) # Copy the directory holding the data files into the baseline directory. data_dir_path = os.path.splitext( root_file_path )[ 0 ] - shutil.copytree( data_dir_path, os.path.join( baseline_dir, os.path.basename( data_dir_path ) ) ) + shutil.copytree( data_dir_path, os.path.join( baseline_directory, os.path.basename( data_dir_path ) ) ) def resultPaths( self ): return [ @@ -654,8 +656,8 @@ class curvecheck( CheckTestStepBase ): params = TestStepBase.defaultParams + CheckTestStepBase.checkParams + ( TestStepBase.commonParams[ "deck" ], TestStepBase.commonParams[ "name" ], TestStepBase.commonParams[ "np" ], - TestStepBase.commonParams[ "allow_rebaseline" ], TestStepBase.commonParams[ "baseline_dir" ], - TestStepBase.commonParams[ "output_directory" ], + TestStepBase.commonParams[ "allow_rebaseline" ], TestStepBase.commonParams[ "baseline_directory" ], + TestStepBase.commonParams[ "output_directory" ], TestStepBase.commonParams[ "test_directory" ], TestParam( "filename", "Name of the target curve file written by GEOS." ), TestParam( "curves", "A list of parameter, setname value pairs." ), TestParam( @@ -709,10 +711,11 @@ def update( self, dictionary ): self.handleCommonParams() self.requireParam( "deck" ) - self.requireParam( "baseline_dir" ) + self.requireParam( "baseline_directory" ) self.requireParam( "output_directory" ) + self.requireParam( "test_directory" ) - self.baseline_file = os.path.join( self.p.baseline_dir, self.p.filename ) + self.baseline_file = os.path.join( self.p.baseline_directory, self.p.filename ) self.target_file = os.path.join( self.p.output_directory, self.p.filename ) self.figure_root = os.path.join( self.p.output_directory, 'curve_check' ) @@ -740,7 +743,14 @@ def makeArgs( self ): if self.p.script_instructions is not None: for c in self.p.script_instructions.split( ';' ): args += [ "-s" ] - args += c.split( ',' ) + + # Split the args and set the absolute script + tmp = c.split( ',' ) + tmp[ 0 ] = os.path.abspath( os.path.join( self.p.test_directory, tmp[ 0 ] ) ) + if not os.path.isfile( tmp[ 0 ] ): + raise FileNotFoundError( f"Could not find requested script for curve check: {tmp[0]}" ) + + args += tmp if self.p.warnings_are_errors: args += [ "-w" ] @@ -753,14 +763,12 @@ def rebaseline( self ): Log( "Rebaseline not allowed for curvecheck of %s." % self.p.name ) return - baseline_dir = os.path.split( self.baseline_file )[ 0 ] - os.makedirs( baseline_dir, exist_ok=True ) + baseline_directory = os.path.split( self.baseline_file )[ 0 ] + os.makedirs( baseline_directory, exist_ok=True ) shutil.copyfile( self.target_file, self.baseline_file ) def resultPaths( self ): - figure_pattern = os.path.join( self.figure_root, '*.png' ) - figure_list = sorted( glob.glob( figure_pattern ) ) - return [ self.target_file ] + figure_list + return [ self.target_file, os.path.join( self.figure_root, '*.png' ) ] def clean( self ): self._clean( self.resultPaths() ) diff --git a/geos_ats_package/setup.cfg b/geos_ats_package/setup.cfg index 66a890a2e..2d64f436e 100644 --- a/geos_ats_package/setup.cfg +++ b/geos_ats_package/setup.cfg @@ -16,6 +16,7 @@ install_requires = mpi4py numpy lxml + tabulate ats @ https://github.com/LLNL/ATS/archive/refs/tags/7.0.105.tar.gz python_requires = >=3.7