From 9e11751717c4dc1a2371ff583689c7200af14900 Mon Sep 17 00:00:00 2001 From: Christopher Sherman Date: Mon, 29 Jan 2024 12:13:31 -0800 Subject: [PATCH 01/14] Resolving merge conflicts --- .../geos_ats/command_line_parsers.py | 105 ++- .../geos_ats/configuration_record.py | 345 +++++---- .../geos_ats/machines/batchGeosatsMoab.py | 80 +- geos_ats_package/geos_ats/main.py | 455 +++++------- geos_ats_package/geos_ats/reporting.py | 679 +++++++++-------- geos_ats_package/geos_ats/rules.py | 225 ------ geos_ats_package/geos_ats/test_case.py | 647 ++++++---------- geos_ats_package/geos_ats/test_steps.py | 699 +++++++++--------- 8 files changed, 1331 insertions(+), 1904 deletions(-) diff --git a/geos_ats_package/geos_ats/command_line_parsers.py b/geos_ats_package/geos_ats/command_line_parsers.py index 1e9b44c..1c3ffc7 100644 --- a/geos_ats_package/geos_ats/command_line_parsers.py +++ b/geos_ats_package/geos_ats/command_line_parsers.py @@ -36,58 +36,53 @@ def build_command_line_parser(): - parser = argparse.ArgumentParser( description="Runs GEOS integrated tests" ) + parser = argparse.ArgumentParser(description="Runs GEOS integrated tests") - parser.add_argument( "geos_bin_dir", type=str, help="GEOS binary directory." ) + parser.add_argument("geos_bin_dir", type=str, help="GEOS binary directory.") - parser.add_argument( "-w", "--workingDir", type=str, help="Initial working directory" ) + parser.add_argument("ats_target", type=str, help="ats file") - action_names = ','.join( action_options.keys() ) - parser.add_argument( "-a", "--action", type=str, default="run", help=f"Test actions options ({action_names})" ) + parser.add_argument("-w", "--workingDir", type=str, help="Root working directory") - check_names = ','.join( check_options.keys() ) - parser.add_argument( "-c", "--check", type=str, default="all", help=f"Test check options ({check_names})" ) + parser.add_argument("-b", "--baselineDir", type=str, help="Root baseline directory") - verbosity_names = ','.join( verbose_options.keys() ) - parser.add_argument( "-v", - "--verbose", - type=str, - default="info", - help=f"Log verbosity options ({verbosity_names})" ) + action_names = ','.join(action_options.keys()) + parser.add_argument("-a", "--action", type=str, default="run", help=f"Test actions options ({action_names})") - parser.add_argument( "-d", - "--detail", - action="store_true", - default=False, - help="Show detailed action/check options" ) + check_names = ','.join(check_options.keys()) + parser.add_argument("-c", "--check", type=str, default="all", help=f"Test check options ({check_names})") - parser.add_argument( "-i", "--info", action="store_true", default=False, help="Info on various topics" ) + verbosity_names = ','.join(verbose_options.keys()) + parser.add_argument("-v", "--verbose", type=str, default="info", help=f"Log verbosity options ({verbosity_names})") - parser.add_argument( "-r", - "--restartCheckOverrides", - nargs='+', - action='append', - help='Restart check parameter override (name value)', - default=[] ) + parser.add_argument("-d", "--detail", action="store_true", default=False, help="Show detailed action/check options") - parser.add_argument( - "--salloc", - default=True, - help="Used by the chaosM machine to first allocate nodes with salloc, before running the tests" ) + parser.add_argument("-i", "--info", action="store_true", default=False, help="Info on various topics") + + parser.add_argument("-r", + "--restartCheckOverrides", + nargs='+', + action='append', + help='Restart check parameter override (name value)', + default=[]) + + parser.add_argument("--salloc", + default=True, + help="Used by the chaosM machine to first allocate nodes with salloc, before running the tests") parser.add_argument( "--sallocoptions", type=str, default="", - help="Used to override all command-line options for salloc. No other options with be used or added." ) + help="Used to override all command-line options for salloc. No other options with be used or added.") - parser.add_argument( "--ats", nargs='+', default=[], action="append", help="pass arguments to ats" ) + parser.add_argument("--ats", nargs='+', default=[], action="append", help="pass arguments to ats") - parser.add_argument( "--machine", default=None, help="name of the machine" ) + parser.add_argument("--machine", default=None, help="name of the machine") - parser.add_argument( "--machine-dir", default=None, help="Search path for machine definitions" ) + parser.add_argument("--machine-dir", default=None, help="Search path for machine definitions") - parser.add_argument( "-l", "--logs", type=str, default=None ) + parser.add_argument("-l", "--logs", type=str, default=None) parser.add_argument( "--failIfTestsFail", @@ -96,14 +91,12 @@ def build_command_line_parser(): help="geos_ats normally exits with 0. This will cause it to exit with an error code if there was a failed test." ) - parser.add_argument( "-n", "-N", "--numNodes", type=int, default="2" ) - - parser.add_argument( "ats_targets", type=str, nargs='*', help="ats files or directories." ) + parser.add_argument("-n", "-N", "--numNodes", type=int, default="2") return parser -def parse_command_line_arguments( args ): +def parse_command_line_arguments(args): parser = build_command_line_parser() options, unkown_args = parser.parse_known_args() exit_flag = False @@ -112,7 +105,7 @@ def parse_command_line_arguments( args ): check = options.check if check not in check_options: print( - f"Selected check option ({check}) not recognized. Try running with --help/--details for more information" ) + f"Selected check option ({check}) not recognized. Try running with --help/--details for more information") exit_flag = True action = options.action @@ -124,37 +117,25 @@ def parse_command_line_arguments( args ): verbose = options.verbose if verbose not in verbose_options: - print( f"Selected verbose option ({verbose}) not recognized" ) + print(f"Selected verbose option ({verbose}) not recognized") exit_flag = True + # Paths + if not options.workingDir: + options.workingDir = os.path.basename(options.ats_target) + + if not options.baselineDir: + options.baselineDir = options.workingDir + # Print detailed information if options.detail: - for option_type, details in zip( [ 'action', 'check' ], [ action_options, check_options ] ): - print( f'\nAvailable {option_type} options:' ) + for option_type, details in zip(['action', 'check'], [action_options, check_options]): + print(f'\nAvailable {option_type} options:') for k, v in details.items(): - print( f' {k}: {v}' ) + print(f' {k}: {v}') exit_flag = True if exit_flag: quit() return options - - -def patch_parser( parser ): - - def add_option_patch( *xargs, **kwargs ): - """ - Convert type string to actual type instance - """ - tmp = kwargs.get( 'type', str ) - type_map = { 'string': str } - if isinstance( tmp, str ): - if tmp in type_map: - tmp = type_map[ tmp ] - else: - tmp = locate( tmp ) - kwargs[ 'type' ] = tmp - parser.add_argument( *xargs, **kwargs ) - - parser.add_option = add_option_patch diff --git a/geos_ats_package/geos_ats/configuration_record.py b/geos_ats_package/geos_ats/configuration_record.py index 78f1ab8..38b2f18 100644 --- a/geos_ats_package/geos_ats/configuration_record.py +++ b/geos_ats_package/geos_ats/configuration_record.py @@ -10,12 +10,12 @@ ################################################################################ # Get the active logger instance -logger = logging.getLogger( 'geos_ats' ) +logger = logging.getLogger('geos_ats') -class ConfigItem( object ): +class ConfigItem(object): - def __init__( self, name, type, default, doc, public ): + def __init__(self, name, type, default, doc, public): self.name = name self.type = type self.default = default @@ -24,259 +24,258 @@ def __init__( self, name, type, default, doc, public ): self.public = public -class Config( object ): +class Config(object): - def __init__( self ): - self.__dict__[ "_items" ] = {} + def __init__(self): + self.__dict__["_items"] = {} - def set( self, name, value ): + def set(self, name, value): # error checking - item = self._items[ name ] + item = self._items[name] try: if item.type == str: - value = item.type( value ) + value = item.type(value) else: - if isinstance( value, str ): - value = item.type( eval( value ) ) + if isinstance(value, str): + value = item.type(eval(value)) else: - value = item.type( value ) + value = item.type(value) except ValueError: - Error( "Attempted to set config.%s (which is %s) with %s" % ( name, str( item.type ), str( value ) ) ) + Error("Attempted to set config.%s (which is %s) with %s" % (name, str(item.type), str(value))) - item.value = item.type( value ) + item.value = item.type(value) - def copy_values( self, target ): - logger.debug( "Copying command line options to config:" ) - target_dict = vars( target ) + def copy_values(self, target): + logger.debug("Copying command line options to config:") + target_dict = vars(target) for k in self._items.keys(): if k in target_dict: - logger.debug( f" {k}: {target_dict[k]}" ) - self.set( k, target_dict[ k ] ) + logger.debug(f" {k}: {target_dict[k]}") + self.set(k, target_dict[k]) - def get( self, name ): + def get(self, name): # error checking - return self._items[ name ].value + return self._items[name].value - def add( self, name, type, default, doc, public=True ): - item = ConfigItem( name, type, default, doc, public ) - self._items[ item.name ] = item + def add(self, name, type, default, doc, public=True): + item = ConfigItem(name, type, default, doc, public) + self._items[item.name] = item - def checkname( self, name ): + def checkname(self, name): if name not in self.__dict__: - matches = difflib.get_close_matches( name, self._items.keys() ) - if len( matches ) == 0: - Error( "Unknown config name: %s. " - "See 'geos_ats -i config' for the complete list." % ( name ) ) + matches = difflib.get_close_matches(name, self._items.keys()) + if len(matches) == 0: + Error("Unknown config name: %s. " + "See 'geos_ats -i config' for the complete list." % (name)) else: - Error( "Unknown config name: %s. " - "Perhaps you meant '%s'. " - "See 'geos_ats -i config' for the complete list." % ( name, matches[ 0 ] ) ) + Error("Unknown config name: %s. " + "Perhaps you meant '%s'. " + "See 'geos_ats -i config' for the complete list." % (name, matches[0])) - def __setattr__( self, name, value ): + def __setattr__(self, name, value): if name in self._items: - self.set( name, value ) + self.set(name, value) else: - self.checkname( name ) + self.checkname(name) - def __getattr__( self, name ): + def __getattr__(self, name): if name in self._items: - return self._items[ name ].value + return self._items[name].value else: - self.checkname( name ) + self.checkname(name) # The global config object config = Config() # Global testTimings object -globalTestTimings = {} # type: ignore[var-annotated] +globalTestTimings = {} # type: ignore[var-annotated] # Depth of testconfig recursion configDepth = 0 -def infoConfigShow( public, outfile=sys.stdout ): - topic = InfoTopic( "config show", outfile ) +def infoConfigShow(public, outfile=sys.stdout): + topic = InfoTopic("config show", outfile) topic.startBanner() - import ats # type: ignore[import] + import ats # type: ignore[import] - keys = sorted( config._items.keys() ) - table = TextTable( 3 ) + keys = sorted(config._items.keys()) + table = TextTable(3) for k in keys: - item = config._items[ k ] - if ( public and item.public ) or ( not public ): + item = config._items[k] + if (public and item.public) or (not public): if item.default == item.value: diff = " " else: diff = "*" - table.addRow( item.name, diff, item.value ) + table.addRow(item.name, diff, item.value) - table.printTable( outfile ) + table.printTable(outfile) - cf = ats.tests.AtsTest.getOptions().get( "configFile" ) - outfile.write( f"\nConfig file: {cf}" ) + cf = ats.tests.AtsTest.getOptions().get("configFile") + outfile.write(f"\nConfig file: {cf}") - configOverride = ats.tests.AtsTest.getOptions().get( "configOverride", {} ) + configOverride = ats.tests.AtsTest.getOptions().get("configOverride", {}) if configOverride: - outfile.write( "\nCommand line overrides:" ) - table = TextTable( 1 ) + outfile.write("\nCommand line overrides:") + table = TextTable(1) for key, value in configOverride.items(): - table.addRow( key ) - table.printTable( outfile ) + table.addRow(key) + table.printTable(outfile) topic.endBanner() -def infoConfigDocumentation( public ): +def infoConfigDocumentation(public): - topic = InfoTopic( "config doc" ) + topic = InfoTopic("config doc") topic.startBanner() - keys = sorted( config._items.keys() ) - table = TextTable( 4 ) - table.addRow( "[NAME]", "[TYPE]", "[DEFAULT]", "[DOC]" ) + keys = sorted(config._items.keys()) + table = TextTable(4) + table.addRow("[NAME]", "[TYPE]", "[DEFAULT]", "[DOC]") for k in keys: - item = config._items[ k ] - if ( public and item.public ) or ( not public ): - table.addRow( item.name, item.type.__name__, item.default, item.doc ) + item = config._items[k] + if (public and item.public) or (not public): + table.addRow(item.name, item.type.__name__, item.default, item.doc) - table.colmax[ 2 ] = 20 + table.colmax[2] = 20 table.printTable() topic.endBanner() -def infoConfig( *args ): +def infoConfig(*args): - menu = InfoTopic( "config" ) - menu.addTopic( "show", "Show all the config options", lambda *x: infoConfigShow( True ) ) - menu.addTopic( "doc", "Documentation for the config options", lambda *x: infoConfigDocumentation( True ) ) - menu.addTopic( "showall", "Show all the config options (including the internal options)", - lambda: infoConfigShow( False ) ) - menu.addTopic( "docall", "Documentation for the config options (including the internal options)", - lambda: infoConfigDocumentation( False ) ) - menu.process( args ) + menu = InfoTopic("config") + menu.addTopic("show", "Show all the config options", lambda *x: infoConfigShow(True)) + menu.addTopic("doc", "Documentation for the config options", lambda *x: infoConfigDocumentation(True)) + menu.addTopic("showall", "Show all the config options (including the internal options)", + lambda: infoConfigShow(False)) + menu.addTopic("docall", "Documentation for the config options (including the internal options)", + lambda: infoConfigDocumentation(False)) + menu.process(args) -def initializeConfig( configFile, configOverride, options ): +def initializeConfig(configFile, configOverride, options): # determine the directory where geos_ats is located. Used to find # location of other programs. - geos_atsdir = os.path.realpath( os.path.dirname( __file__ ) ) + geos_atsdir = os.path.realpath(os.path.dirname(__file__)) # configfile - config.add( "testbaseline_dir", str, "", "Base directory that contains all the baselines" ) + config.add("testbaseline_directory", str, "", "Base directory that contains all the baselines") - config.add( "geos_bin_dir", str, "", "Directory that contains 'geos' and related executables." ) + config.add("geos_bin_dir", str, "", "Directory that contains 'geos' and related executables.") - config.add( "userscript_path", str, "", - "Directory that contains scripts for testing, searched after test directory and executable_path." ) + config.add("userscript_path", str, "", + "Directory that contains scripts for testing, searched after test directory and executable_path.") - config.add( "clean_on_pass", bool, False, "If True, then after a TestCase passes, " - "all temporary files are removed." ) + config.add("clean_on_pass", bool, False, "If True, then after a TestCase passes, " + "all temporary files are removed.") # geos options - config.add( "geos_default_args", str, "-i", - "A string containing arguments that will always appear on the geos commandline" ) + config.add("geos_default_args", str, "-i", + "A string containing arguments that will always appear on the geos commandline") # reporting - config.add( "report_html", bool, True, "True if HTML formatted results will be generated with the report action" ) - config.add( "report_html_file", str, "test_results.html", "Location to write the html report" ) - config.add( "report_html_periodic", bool, True, "True to update the html file during the periodic reports" ) - config.add( "browser_command", str, "firefox -no-remote", - "Command to use to launch a browser to view html results" ) - config.add( "browser", bool, False, "If True, then launch the browser_command to view the report_html_file" ) - config.add( "report_doc_dir", str, os.path.normpath( os.path.join( geos_atsdir, "..", "doc" ) ), - "Location to the test doc directory (used with html reports)" ) - config.add( "report_doc_link", bool, True, "Link against docgen (used with html reports)" ) - config.add( "report_doc_remake", bool, False, - "Remake test documentation, even if it already exists (used with html reports)" ) + config.add("report_html", bool, True, "True if HTML formatted results will be generated with the report action") + config.add("report_html_file", str, "test_results.html", "Location to write the html report") + config.add("report_html_periodic", bool, True, "True to update the html file during the periodic reports") + config.add("browser_command", str, "firefox -no-remote", "Command to use to launch a browser to view html results") + config.add("browser", bool, False, "If True, then launch the browser_command to view the report_html_file") + config.add("report_doc_dir", str, os.path.normpath(os.path.join(geos_atsdir, "..", "doc")), + "Location to the test doc directory (used with html reports)") + config.add("report_doc_link", bool, True, "Link against docgen (used with html reports)") + config.add("report_doc_remake", bool, False, + "Remake test documentation, even if it already exists (used with html reports)") - config.add( "report_text", bool, True, "True if you want text results to be generated with the report action" ) - config.add( "report_text_file", str, "test_results.txt", "Location to write the text report" ) - config.add( "report_text_echo", bool, True, "If True, echo the report to stdout" ) - config.add( "report_wait", bool, False, "Wait until all tests are complete before reporting" ) + config.add("report_text", bool, True, "True if you want text results to be generated with the report action") + config.add("report_text_file", str, "test_results.txt", "Location to write the text report") + config.add("report_text_echo", bool, True, "If True, echo the report to stdout") + config.add("report_wait", bool, False, "Wait until all tests are complete before reporting") - config.add( "report_ini", bool, True, "True if you want ini results to be generated with the report action" ) - config.add( "report_ini_file", str, "test_results.ini", "Location to write the ini report" ) + config.add("report_ini", bool, True, "True if you want ini results to be generated with the report action") + config.add("report_ini_file", str, "test_results.ini", "Location to write the ini report") - config.add( "report_notations", type( [] ), [], "Lines of text that are inserted into the reports." ) + config.add("report_notations", type([]), [], "Lines of text that are inserted into the reports.") - config.add( "report_notbuilt_regexp", str, "(not built into this version)", - "Regular expression that must appear in output to indicate that feature is not built." ) + config.add("report_notbuilt_regexp", str, "(not built into this version)", + "Regular expression that must appear in output to indicate that feature is not built.") - config.add( "checkmessages_always_ignore_regexp", type( [] ), [ "not available in this version" ], - "Regular expression to ignore in all checkmessages steps." ) + config.add("checkmessages_always_ignore_regexp", type([]), ["not available in this version"], + "Regular expression to ignore in all checkmessages steps.") - config.add( "checkmessages_never_ignore_regexp", type( [] ), [ "not yet implemented" ], - "Regular expression to not ignore in all checkmessages steps." ) + config.add("checkmessages_never_ignore_regexp", type([]), ["not yet implemented"], + "Regular expression to not ignore in all checkmessages steps.") - config.add( "report_timing", bool, False, "True if you want timing file to be generated with the report action" ) - config.add( "report_timing_overwrite", bool, False, - "True if you want timing file to overwrite existing timing file rather than augment it" ) + config.add("report_timing", bool, False, "True if you want timing file to be generated with the report action") + config.add("report_timing_overwrite", bool, False, + "True if you want timing file to overwrite existing timing file rather than augment it") # timing and priority - config.add( "priority", str, "equal", "Method of prioritization of tests: [\"equal\", \"processors\",\"timing\"]" ) - config.add( "timing_file", str, "timing.txt", "Location of timing file" ) + config.add("priority", str, "equal", "Method of prioritization of tests: [\"equal\", \"processors\",\"timing\"]") + config.add("timing_file", str, "timing.txt", "Location of timing file") # batch - config.add( "batch_dryrun", bool, False, - "If true, the batch jobs will not be submitted, but the batch scripts will be created" ) - config.add( "batch_interactive", bool, False, "If true, the batch jobs will be treated as interactive jobs" ) - config.add( "batch_bank", str, "", "The name of the bank to use" ) - config.add( "batch_ppn", int, 0, "Number of processors per node" ) - config.add( "batch_partition", str, "", "the batch partition, if not specified the default will be used." ) - config.add( "batch_queue", str, "pbatch", "the batch queue." ) - config.add( "batch_header", type( [] ), [], "Additional lines to add to the batch header" ) + config.add("batch_dryrun", bool, False, + "If true, the batch jobs will not be submitted, but the batch scripts will be created") + config.add("batch_interactive", bool, False, "If true, the batch jobs will be treated as interactive jobs") + config.add("batch_bank", str, "", "The name of the bank to use") + config.add("batch_ppn", int, 0, "Number of processors per node") + config.add("batch_partition", str, "", "the batch partition, if not specified the default will be used.") + config.add("batch_queue", str, "pbatch", "the batch queue.") + config.add("batch_header", type([]), [], "Additional lines to add to the batch header") # retry - config.add( "max_retry", int, 2, "Maximum number of times to retry failed runs." ) - config.add( "retry_err_regexp", str, - "(launch failed|Failure in initializing endpoint|channel initialization failed)", - "Regular expression that must appear in error log in order to retry." ) + config.add("max_retry", int, 2, "Maximum number of times to retry failed runs.") + config.add("retry_err_regexp", str, + "(launch failed|Failure in initializing endpoint|channel initialization failed)", + "Regular expression that must appear in error log in order to retry.") # timeout - config.add( "default_timelimit", str, "30m", - "This sets a default timelimit for all test steps which do not explicitly set a timelimit." ) - config.add( "override_timelimit", bool, False, - "If true, the value used for the default time limit will override the time limit for each test step." ) + config.add("default_timelimit", str, "30m", + "This sets a default timelimit for all test steps which do not explicitly set a timelimit.") + config.add("override_timelimit", bool, False, + "If true, the value used for the default time limit will override the time limit for each test step.") # Decomposition Multiplication config.add( "decomp_factor", int, 1, "This sets the multiplication factor to be applied to the decomposition and number of procs of all eligible tests." ) - config.add( "override_np", int, 0, "If non-zero, maximum number of processors to use for each test step." ) + config.add("override_np", int, 0, "If non-zero, maximum number of processors to use for each test step.") # global environment variables - config.add( "environment", dict, {}, "Additional environment variables to use during testing" ) + config.add("environment", dict, {}, "Additional environment variables to use during testing") # General check config - for check in ( "restartcheck", ): + for check in ("restartcheck", ): config.add( "%s_enabled" % check, bool, True, "If True, this check has the possibility of running, " "but might not run depending on the '--check' command line option. " - "If False, this check will never be run." ) + "If False, this check will never be run.") - for check in ( "hdf5_dif.py", ): - config.add( "%s_script" % check, - str, - os.path.join( geos_atsdir, "helpers/%s.py" % check ), - "Location to the %s frontend script." % check, - public=False ) + for check in ("hdf5_dif.py", ): + config.add("%s_script" % check, + str, + os.path.join(geos_atsdir, "helpers/%s.py" % check), + "Location to the %s frontend script." % check, + public=False) # Checks: Restartcheck - config.add( "restart_skip_missing", bool, False, "Determines whether new/missing fields are ignored" ) - config.add( "restart_exclude_pattern", list, [], "A list of field names to ignore in restart files" ) + config.add("restart_skip_missing", bool, False, "Determines whether new/missing fields are ignored") + config.add("restart_exclude_pattern", list, [], "A list of field names to ignore in restart files") # Checks: Curvecheck - config.add( "curvecheck_enabled", bool, True, "Determines whether curvecheck steps are run." ) - config.add( "curvecheck_tapestry_mode", bool, False, - "Provide temporary backwards compatibility for nighty and weekly suites until they are using geos_ats" ) - config.add( "curvecheck_absolute", float, 1e-5, "absolute tolerance" ) - config.add( "curvecheck_relative", float, 1e-5, "relative tolerance" ) + config.add("curvecheck_enabled", bool, True, "Determines whether curvecheck steps are run.") + config.add("curvecheck_tapestry_mode", bool, False, + "Provide temporary backwards compatibility for nighty and weekly suites until they are using geos_ats") + config.add("curvecheck_absolute", float, 1e-5, "absolute tolerance") + config.add("curvecheck_relative", float, 1e-5, "relative tolerance") config.add( "curvecheck_failtype", str, "composite", "String that represents failure check. 'composite or relative' will fail curvecheck if either the composite error or relative error is too high. 'absolute and slope' will fail only if both the absolute error check and the slope error check fail. The default value is 'composite'." @@ -289,74 +288,74 @@ def initializeConfig( configFile, configOverride, options ): "curvecheck_delete_temps", bool, True, "Curvecheck generates a number of temporary data files that are used to create the images for the html file. If this parameter is true, curvecheck will delete these temporary files. By default, the parameter is true." ) - config.add( "gnuplot_executable", str, os.path.join( "/usr", "bin", "gnuplot" ), "Location to gnuplot" ) + config.add("gnuplot_executable", str, os.path.join("/usr", "bin", "gnuplot"), "Location to gnuplot") # Rebaseline: config.add( "rebaseline_undo", bool, False, "If True, and the action is set to 'rebaseline'," - " this option will undo (revert) a previous rebaseline." ) - config.add( "rebaseline_ask", bool, True, "If True, the rebaseline will not occur until the user has anwered an" - " 'are you sure?' question" ) + " this option will undo (revert) a previous rebaseline.") + config.add("rebaseline_ask", bool, True, "If True, the rebaseline will not occur until the user has anwered an" + " 'are you sure?' question") # test modifier - config.add( "testmodifier", str, "", "Name of a test modifier to apply" ) + config.add("testmodifier", str, "", "Name of a test modifier to apply") # filters - config.add( "filter_maxprocessors", int, -1, "If not -1, Run only those tests where the number of" - " processors is less than or equal to this value" ) + config.add("filter_maxprocessors", int, -1, "If not -1, Run only those tests where the number of" + " processors is less than or equal to this value") # machines - config.add( "machine_options", list, [], "Arguments to pass to the machine module" ) + config.add("machine_options", list, [], "Arguments to pass to the machine module") - config.add( "script_launch", int, 0, "Whether to launch scripts (and other serial steps) on compute nodes" ) - config.add( "openmpi_install", str, "", "Location to the openmpi installation" ) - config.add( "openmpi_maxprocs", int, 0, "Number of maximum processors openmpi" ) - config.add( "openmpi_procspernode", int, 1, "Number of processors per node for openmpi" ) + config.add("script_launch", int, 0, "Whether to launch scripts (and other serial steps) on compute nodes") + config.add("openmpi_install", str, "", "Location to the openmpi installation") + config.add("openmpi_maxprocs", int, 0, "Number of maximum processors openmpi") + config.add("openmpi_procspernode", int, 1, "Number of processors per node for openmpi") config.add( "openmpi_precommand", str, "", "A string that will be" " prepended to each command. If the substring '%(np)s' is present," " it will be replaced by then number of processors required for the" " test. If the substring '%(J)s' is present, it will be replaced by" - " the unique name of the test." ) - config.add( "openmpi_args", str, "", "A string of arguments to mpirun" ) + " the unique name of the test.") + config.add("openmpi_args", str, "", "A string of arguments to mpirun") config.add( "openmpi_terminate", str, "", "A string that will be" " called upon abnormal termination. If the substring '%(J)s' is present," - " it will be replaced by the unique name of the test." ) + " it will be replaced by the unique name of the test.") - config.add( "windows_mpiexe", str, "", "Location to mpiexe" ) - config.add( "windows_nompi", bool, False, "Run executables on nompi processor" ) - config.add( "windows_oversubscribe", int, 1, - "Multiplier to number of processors to allow oversubscription of processors" ) + config.add("windows_mpiexe", str, "", "Location to mpiexe") + config.add("windows_nompi", bool, False, "Run executables on nompi processor") + config.add("windows_oversubscribe", int, 1, + "Multiplier to number of processors to allow oversubscription of processors") # populate the config with overrides from the command line for key, value in configOverride.items(): try: - setattr( config, key, value ) + setattr(config, key, value) except RuntimeError as e: # this allows for the testconfig file to define it's own # config options that can be overridden at the command line. - logger.debug( e ) + logger.debug(e) # Setup the config dict if configFile: - logger.warning( "Config file override currently not available" ) + logger.warning("Config file override currently not available") ## override the config file from the command line for key, value in configOverride.items(): - setattr( config, key, value ) + setattr(config, key, value) # validate prioritization scheme - if config.priority.lower().startswith( "eq" ): + if config.priority.lower().startswith("eq"): config.priority = "equal" - elif config.priority.lower().startswith( "proc" ): + elif config.priority.lower().startswith("proc"): config.priority = "processors" - elif config.priority.lower().startswith( "tim" ): + elif config.priority.lower().startswith("tim"): config.priority = "timing" else: - Error( "priority '%s' is not valid" % config.priority ) + Error("priority '%s' is not valid" % config.priority) ## environment variables for k, v in config.environment.items(): - os.environ[ k ] = v + os.environ[k] = v diff --git a/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py b/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py index fb1e4f0..bc5747d 100644 --- a/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py +++ b/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py @@ -1,36 +1,36 @@ #BATS:batchGeosatsMoab batchGeosatsMoab BatchGeosatsMoab -1 -from ats import machines, configuration, log, atsut, times, AtsTest # type: ignore[import] -import subprocess, sys, os, shlex, time, socket, re -import utils, batchTemplate # type: ignore[import] -from batch import BatchMachine # type: ignore[import] +from ats import machines, configuration, log, atsut, times, AtsTest # type: ignore[import] +import subprocess, sys, os, time, socket, re +import utils # type: ignore[import] +from batch import BatchMachine # type: ignore[import] import logging debug = configuration.debug -logger = logging.getLogger( 'geos_ats' ) +logger = logging.getLogger('geos_ats') -class BatchGeosatsMoab( BatchMachine ): +class BatchGeosatsMoab(BatchMachine): """The batch machine """ - def init( self ): + def init(self): - super( BatchGeosatsMoab, self ).init() + super(BatchGeosatsMoab, self).init() if "SLURM_NNODES" in os.environ.keys(): - self.ppn = int( os.getenv( "SLURM_TASKS_PER_NODE", "1" ).split( "(" )[ 0 ] ) + self.ppn = int(os.getenv("SLURM_TASKS_PER_NODE", "1").split("(")[0]) elif "SLURM_JOB_NUM_NODES" in os.environ.keys(): - self.ppn = int( os.getenv( "SLURM_JOB_CPUS_PER_NODE", "1" ).split( "(" )[ 0 ] ) + self.ppn = int(os.getenv("SLURM_JOB_CPUS_PER_NODE", "1").split("(")[0]) else: self.ppn = 0 self.numberTestsRunningMax = 2048 - def canRun( self, test ): + def canRun(self, test): return '' - def load( self, testlist ): + def load(self, testlist): """Receive a list of tests to possibly run. Submit the set of tests to batch. """ @@ -41,20 +41,20 @@ def load( self, testlist ): # for each test group make an msub file if t.groupSerialNumber == 1: - testCase = getattr( t, "geos_atsTestCase", None ) + testCase = getattr(t, "geos_atsTestCase", None) if testCase: - batchFilename = os.path.join( testCase.dirnamefull, "batch_%s.msub" % testCase.name ) - self.writeSubmitScript( batchFilename, testCase ) - self.jobid = self.submitBatchScript( testCase.name, batchFilename ) + batchFilename = os.path.join(testCase.dirnamefull, "batch_%s.msub" % testCase.name) + self.writeSubmitScript(batchFilename, testCase) + self.jobid = self.submitBatchScript(testCase.name, batchFilename) - def writeSubmitScript( self, batchFilename, testCase ): + def writeSubmitScript(self, batchFilename, testCase): - fc = open( batchFilename, "w" ) + fc = open(batchFilename, "w") batch = testCase.batch # get references to the options and configuration options = AtsTest.getOptions() - config = options.get( "config", None ) + config = options.get("config", None) # ppn # 1. first check batch object @@ -69,10 +69,10 @@ def writeSubmitScript( self, batchFilename, testCase ): ppn = self.ppn if ppn == 0: - raise RuntimeError( """ + raise RuntimeError(""" Unable to find the number of processors per node in BatchGeosatsMoab. Try setting batch_ppn= on the - command line.""" ) + command line.""") # Specifies parallel Lustre file system. gresLine = "" @@ -81,7 +81,7 @@ def writeSubmitScript( self, batchFilename, testCase ): # determine the max number of processors in this job maxprocs = testCase.findMaxNumberOfProcessors() - minNodes = maxprocs / ppn + ( maxprocs % ppn != 0 ) + minNodes = maxprocs / ppn + (maxprocs % ppn != 0) # MSUB options msub_str = '#!/bin/csh' @@ -114,20 +114,20 @@ def writeSubmitScript( self, batchFilename, testCase ): msub_str += f"\n\ncd {testCase.dirnamefull}" # pull out options to construct the command line - action = options.get( "action" ) - checkoption = options.get( "checkoption" ) - configFile = options.get( "configFile" ) - configOverride = options.get( "configOverride" ) - atsFlags = options.get( "atsFlags" ) - geos_atsPath = options.get( "geos_atsPath" ) - machine = options.get( "machine" ) + action = options.get("action") + checkoption = options.get("checkoption") + configFile = options.get("configFile") + configOverride = options.get("configOverride") + atsFlags = options.get("atsFlags") + geos_atsPath = options.get("geos_atsPath") + machine = options.get("machine") # construct the command line msub_str += f'\n{geos_atsPath} -a {action} -c {checkoption}' msub_str += f' -f {configFile} -N {minNodes:d} --machine={machine}' for key, value in configOverride.items(): - if key.startswith( "batch" ): + if key.startswith("batch"): continue msub_str += f' {key}="{value}"' @@ -138,26 +138,26 @@ def writeSubmitScript( self, batchFilename, testCase ): msub_str += f" batch_interactive=True {testCase.name}" # Write and close the file - fc.write( msub_str ) + fc.write(msub_str) fc.close() - def submitBatchScript( self, testname, batchFilename ): + def submitBatchScript(self, testname, batchFilename): options = AtsTest.getOptions() - config = options.get( "config", None ) + config = options.get("config", None) if config and config.batch_dryrun: return - p = subprocess.Popen( [ "msub", batchFilename ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) - out = p.communicate()[ 0 ] + p = subprocess.Popen(["msub", batchFilename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + out = p.communicate()[0] if p.returncode: - raise RuntimeError( f"Error submitting {testname} to batch: {out}" ) + raise RuntimeError(f"Error submitting {testname} to batch: {out}") try: - jobid = int( out ) - logger.info( f" Submitting {testname}, jobid = {jobid:d}" ) + jobid = int(out) + logger.info(f" Submitting {testname}, jobid = {jobid:d}") except: err = f"Error submitting {testname} to batch: {out}" - logger.error( err ) - raise RuntimeError( err ) + logger.error(err) + raise RuntimeError(err) diff --git a/geos_ats_package/geos_ats/main.py b/geos_ats_package/geos_ats/main.py index f03b63f..8595327 100644 --- a/geos_ats_package/geos_ats/main.py +++ b/geos_ats_package/geos_ats/main.py @@ -1,6 +1,5 @@ import sys import os -import glob import shutil import signal import subprocess @@ -8,12 +7,12 @@ import logging from geos_ats import command_line_parsers -test_actions = ( "run", "rerun", "check", "continue" ) -report_actions = ( "run", "rerun", "report", "continue" ) +test_actions = ("run", "rerun", "check", "continue") +report_actions = ("run", "rerun", "report", "continue") # Setup the logger -logging.basicConfig( level=logging.DEBUG, format='(%(asctime)s %(module)s:%(lineno)d) %(message)s' ) -logger = logging.getLogger( 'geos_ats' ) +logging.basicConfig(level=logging.DEBUG, format='(%(asctime)s %(module)s:%(lineno)d) %(message)s') +logger = logging.getLogger('geos_ats') # Job records current_subproc = None @@ -21,144 +20,100 @@ geos_atsStartTime = 0 -def check_ats_targets( options, testcases, configOverride, args ): - """ - Determine which files, directories, or tests to run. - Handle command line config options. - """ - configOverride[ "executable_path" ] = options.geos_bin_dir - - ats_files = [] - for a in options.ats_targets: - if "=" in a: - key, value = a.split( "=" ) - configOverride[ key ] = value - args.remove( a ) - - elif not options.info: - if os.path.exists( a ): - args.remove( a ) - if os.path.isdir( a ): - newfiles = glob.glob( os.path.join( a, "*.ats" ) ) - ats_files.extend( newfiles ) - else: - ats_files.append( a ) - else: - testcases.append( a ) - else: - if options.action in test_actions: - logger.error( f"The command line arg '{a}' is not recognized." - " An ats file or a directory name is expected." ) - sys.exit( 1 ) - - # If no files were specified, look in the target directories - for d in [ '.', 'integratedTests' ]: - if len( ats_files ) == 0: - if os.path.isdir( d ): - ats_files.extend( glob.glob( os.path.join( d, "*.ats" ) ) ) - - # prune out ats continue files. - for a in ats_files[ : ]: - if a.endswith( "continue.ats" ): - ats_files.remove( a ) - - return ats_files - - -def build_ats_arguments( options, ats_files, originalargv, config ): +def build_ats_arguments(options, originalargv, config): # construct the argv to pass to the ATS: atsargv = [] - atsargv.append( originalargv[ 0 ] ) - atsargv.append( "--showGroupStartOnly" ) - atsargv.append( "--logs=%s" % options.logs ) + atsargv.append(originalargv[0]) + atsargv.append("--showGroupStartOnly") + atsargv.append("--logs=%s" % options.logs) if config.batch_interactive: - atsargv.append( "--allInteractive" ) - atsargv.extend( config.machine_options ) + atsargv.append("--allInteractive") + atsargv.extend(config.machine_options) for x in options.ats: # Add the appropriate argument indicators back based on their length - if len( x[ 0 ] ) == 1: - x[ 0 ] = '-' + x[ 0 ] + if len(x[0]) == 1: + x[0] = '-' + x[0] else: - x[ 0 ] = '--' + x[ 0 ] - atsargv.extend( x ) + x[0] = '--' + x[0] + atsargv.extend(x) - for f in os.environ.get( 'ATS_FILTER', '' ).split( ',' ): - atsargv.extend( [ '-f', f ] ) + for f in os.environ.get('ATS_FILTER', '').split(','): + atsargv.extend(['-f', f]) - atsargv.extend( ats_files ) + atsargv.append(options.ats_target) sys.argv = atsargv -def write_log_dir_summary( logdir, originalargv ): +def write_log_dir_summary(logdir, originalargv): from geos_ats import configuration_record - with open( os.path.join( logdir, "geos_ats.config" ), "w" ) as logconfig: - tmp = " ".join( originalargv[ 1: ] ) - logconfig.write( f'Run with: "{tmp}"\n' ) - configuration_record.infoConfigShow( True, logconfig ) + with open(os.path.join(logdir, "geos_ats.config"), "w") as logconfig: + tmp = " ".join(originalargv[1:]) + logconfig.write(f'Run with: "{tmp}"\n') + configuration_record.infoConfigShow(True, logconfig) -def handleShutdown( signum, frame ): +def handleShutdown(signum, frame): if current_jobid is not None: term = "scancel -n %s" % current_jobid - subprocess.call( term, shell=True ) - sys.exit( 1 ) + subprocess.call(term, shell=True) + sys.exit(1) -def handle_salloc_relaunch( options, originalargv, configOverride ): +def handle_salloc_relaunch(options, originalargv, configOverride): tests = [ options.action in test_actions, options.salloc, options.machine - in ( "SlurmProcessorScheduled", "GeosAtsSlurmProcessorScheduled" ), "SLURM_JOB_ID" not in os.environ + in ("SlurmProcessorScheduled", "GeosAtsSlurmProcessorScheduled"), "SLURM_JOB_ID" not in os.environ ] - if all( tests ): + if all(tests): if options.sallocOptions != "": - sallocCommand = [ "salloc" ] + options.sallocOptions.split( " " ) + sallocCommand = ["salloc"] + options.sallocOptions.split(" ") else: - sallocCommand = [ "salloc", "-ppdebug", "--exclusive", "-N", "%d" % options.numNodes ] + sallocCommand = ["salloc", "-ppdebug", "--exclusive", "-N", "%d" % options.numNodes] if "testmodifier" in configOverride: - if configOverride[ "testmodifier" ] == "memcheck": - p = subprocess.Popen( [ 'sinfo', '-o', '%l', '-h', '-ppdebug' ], stdout=subprocess.PIPE ) + if configOverride["testmodifier"] == "memcheck": + p = subprocess.Popen(['sinfo', '-o', '%l', '-h', '-ppdebug'], stdout=subprocess.PIPE) out, err = p.communicate() - tarray = out.split( ":" ) + tarray = out.split(":") seconds = tarray.pop() minutes = tarray.pop() hours = 0 days = 0 - if len( tarray ) > 0: + if len(tarray) > 0: hours = tarray.pop() try: - days, hours = hours.split( '-' ) + days, hours = hours.split('-') except ValueError as e: - logger.debug( e ) - limit = min( 360, ( 24 * int( days ) + int( hours ) ) * 60 + int( minutes ) ) - sallocCommand.extend( [ "-t", "%d" % limit ] ) + logger.debug(e) + limit = min(360, (24 * int(days) + int(hours)) * 60 + int(minutes)) + sallocCommand.extend(["-t", "%d" % limit]) # generate a "unique" name for the salloc job so we can remove it later - timeNow = time.strftime( '%H%M%S', time.localtime() ) + timeNow = time.strftime('%H%M%S', time.localtime()) current_jobid = "geos_ats_%s" % timeNow # add the name to the arguments (this will override any previous name specification) - sallocCommand.extend( [ "-J", "%s" % current_jobid ] ) + sallocCommand.extend(["-J", "%s" % current_jobid]) # register our signal handler - signal.signal( signal.SIGTERM, handleShutdown ) + signal.signal(signal.SIGTERM, handleShutdown) command = sallocCommand # omit --workingDir on relaunch, as we have already changed directories - relaunchargv = [ x for x in originalargv if not x.startswith( "--workingDir" ) ] + relaunchargv = [x for x in originalargv if not x.startswith("--workingDir")] command += relaunchargv - command += [ "--logs=%s" % options.logs ] - p = subprocess.Popen( command ) + command += ["--logs=%s" % options.logs] + p = subprocess.Popen(command) p.wait() - sys.exit( p.returncode ) + sys.exit(p.returncode) def getLogDirBaseName(): return "TestLogs" -def create_log_directory( options ): +def create_log_directory(options): """ When the action will run tests (e.g. "run", "rerun", "check", "continue", then the LogDir is numbered, and saved. When the action does not run @@ -171,193 +126,157 @@ def create_log_directory( options ): basename = getLogDirBaseName() index = 1 while True: - options.logs = "%s.%03d" % ( basename, index ) - if not os.path.exists( options.logs ): + options.logs = "%s.%03d" % (basename, index) + if not os.path.exists(options.logs): break index += 1 # make the options.logs - os.mkdir( options.logs ) + os.mkdir(options.logs) # make symlink try: - if os.path.exists( basename ): - if os.path.islink( basename ): - os.remove( basename ) + if os.path.exists(basename): + if os.path.islink(basename): + os.remove(basename) else: - logger.error( f"unable to replace {basename} with a symlink to {options.logs}" ) + logger.error(f"unable to replace {basename} with a symlink to {options.logs}") - if not os.path.exists( basename ): - os.symlink( options.logs, basename ) + if not os.path.exists(basename): + os.symlink(options.logs, basename) except: - logger.error( "unable to name a symlink to to logdir" ) + logger.error("unable to name a symlink to to logdir") else: if options.action in test_actions: - options.logs = "%s.%s" % ( getLogDirBaseName(), options.action ) + options.logs = "%s.%s" % (getLogDirBaseName(), options.action) elif options.info: - options.logs = "%s.info" % ( getLogDirBaseName() ) + options.logs = "%s.info" % (getLogDirBaseName()) else: - if not os.path.join( options.logs ): - os.mkdir( options.logs ) + if not os.path.join(options.logs): + os.mkdir(options.logs) -def check_timing_file( options, config ): - if options.action in [ "run", "rerun", "continue" ]: +def check_timing_file(options, config): + if options.action in ["run", "rerun", "continue"]: if config.timing_file: - if not os.path.isfile( config.timing_file ): - logger.warning( f'Timing file does not exist {config.timing_file}' ) + if not os.path.isfile(config.timing_file): + logger.warning(f'Timing file does not exist {config.timing_file}') return from geos_ats import configuration_record - with open( config.timing_file, "r" ) as filep: + with open(config.timing_file, "r") as filep: for line in filep: - if not line.startswith( '#' ): + if not line.startswith('#'): tokens = line.split() - configuration_record.globalTestTimings[ tokens[ 0 ] ] = int( tokens[ 1 ] ) + configuration_record.globalTestTimings[tokens[0]] = int(tokens[1]) -def append_test_end_step( machine ): - """ - Add extra processing to the end of tests - """ - originalNoteEnd = machine.noteEnd - - def noteEndWrapper( test ): - test.geos_atsTestCase.status.noteEnd( test ) - return originalNoteEnd( test ) - - machine.noteEnd = noteEndWrapper - - -def check_working_dir( workingDir ): - if workingDir: - if os.path.isdir( workingDir ): - os.chdir( workingDir ) - else: - logger.error( f"The requested working dir does not appear to exist: {workingDir}" ) - quit() - - -def infoOptions( title, options ): +def infoOptions(title, options): from geos_ats import common_utilities - topic = common_utilities.InfoTopic( title ) + topic = common_utilities.InfoTopic(title) topic.startBanner() - table = common_utilities.TextTable( 2 ) + table = common_utilities.TextTable(2) for opt, desc in options: - table.addRow( opt, desc ) + table.addRow(opt, desc) table.printTable() topic.endBanner() -def infoParagraph( title, paragraphs ): - from geos_ats import common_utilities - topic = common_utilities.InfoTopic( title ) - topic.startBanner() - table = common_utilities.TextTable( 1 ) - for p in paragraphs: - table.addRow( p ) - table.rowbreak = 1 - table.maxwidth = 75 - table.printTable() - topic.endBanner() +def info(args): + from geos_ats import (common_utilities, configuration_record, test_steps, suite_settings, test_case, test_modifier) + infoLabels = lambda *x: suite_settings.infoLabels(suite_settings.__file__) + infoOwners = lambda *x: suite_settings.infoOwners(suite_settings.__file__) -def info( args ): - from geos_ats import ( common_utilities, configuration_record, test_steps, suite_settings, test_case, - test_modifier ) - - infoLabels = lambda *x: suite_settings.infoLabels( suite_settings.__file__ ) - infoOwners = lambda *x: suite_settings.infoOwners( suite_settings.__file__ ) - - menu = common_utilities.InfoTopic( "geos_ats info menu" ) - menu.addTopic( "teststep", "Reference on all the TestStep", test_steps.infoTestSteps ) - menu.addTopic( "testcase", "Reference on the TestCase", test_case.infoTestCase ) - menu.addTopic( "labels", "List of labels", infoLabels ) - menu.addTopic( "owners", "List of owners", infoOwners ) - menu.addTopic( "config", "Reference on config options", configuration_record.infoConfig ) - menu.addTopic( "actions", "Description of the command line action options", - lambda *x: infoOptions( "command line actions", command_line_parsers.action_ptions ) ) - menu.addTopic( "checks", "Description of the command line check options", - lambda *x: infoOptions( "command line checks", command_line_parsers.check_options ) ) - menu.addTopic( "modifiers", "List of test modifiers", test_modifier.infoTestModifier ) + menu = common_utilities.InfoTopic("geos_ats info menu") + menu.addTopic("teststep", "Reference on all the TestStep", test_steps.infoTestSteps) + menu.addTopic("testcase", "Reference on the TestCase", test_case.infoTestCase) + menu.addTopic("labels", "List of labels", infoLabels) + menu.addTopic("owners", "List of owners", infoOwners) + menu.addTopic("config", "Reference on config options", configuration_record.infoConfig) + menu.addTopic("actions", "Description of the command line action options", + lambda *x: infoOptions("command line actions", command_line_parsers.action_ptions)) + menu.addTopic("checks", "Description of the command line check options", + lambda *x: infoOptions("command line checks", command_line_parsers.check_options)) + menu.addTopic("modifiers", "List of test modifiers", test_modifier.infoTestModifier) # menu.addTopic("testconfig", "Information on the testconfig.py file", # lambda *x: infoParagraph("testconfig", command_line_parsers.test_config_info)) - menu.process( args ) + menu.process(args) -def report( manager ): +def report(manager): """The report action""" - from geos_ats import ( test_case, reporting, configuration_record ) + from geos_ats import (test_case, reporting, configuration_record) testcases = test_case.TESTS.values() if configuration_record.config.report_wait: - reporter = reporting.ReportWait( testcases ) - reporter.report( sys.stdout ) + reporter = reporting.ReportWait(testcases) + reporter.report(sys.stdout) if configuration_record.config.report_text: - reporter = reporting.ReportText( testcases ) - with open( configuration_record.config.report_text_file, "w" ) as filep: - reporter.report( filep ) + reporter = reporting.ReportText(testcases) + with open(configuration_record.config.report_text_file, "w") as filep: + reporter.report(filep) if configuration_record.config.report_text_echo: - with open( configuration_record.config.report_text_file, "r" ) as filep: - sys.stdout.write( filep.read() ) + with open(configuration_record.config.report_text_file, "r") as filep: + sys.stdout.write(filep.read()) if configuration_record.config.report_html: - reporter = reporting.ReportHTML( testcases ) + reporter = reporting.ReportHTML(testcases) reporter.report() if configuration_record.config.report_ini: - reporter = reporting.ReportIni( testcases ) - with open( configuration_record.config.report_ini_file, "w" ) as filep: - reporter.report( filep ) + reporter = reporting.ReportIni(testcases) + with open(configuration_record.config.report_ini_file, "w") as filep: + reporter.report(filep) if configuration_record.config.report_timing: - reporter = reporting.ReportTiming( testcases ) + reporter = reporting.ReportTiming(testcases) if not configuration_record.config.report_timing_overwrite: try: - with open( configuration_record.config.timing_file, "r" ) as filep: - reporter.getOldTiming( filep ) + with open(configuration_record.config.timing_file, "r") as filep: + reporter.getOldTiming(filep) except IOError as e: - logger.debug( e ) - with open( configuration_record.config.timing_file, "w" ) as filep: - reporter.report( filep ) + logger.debug(e) + with open(configuration_record.config.timing_file, "w") as filep: + reporter.report(filep) -def summary( manager, alog, short=False ): +def summary(manager, alog, short=False): """Periodic summary and final summary""" - from geos_ats import ( reporting, configuration_record, test_case ) + from geos_ats import (reporting, configuration_record, test_case) - if len( manager.testlist ) == 0: + if len(manager.testlist) == 0: return - if hasattr( manager.machine, "getNumberOfProcessors" ): - totalNumberOfProcessors = getattr( manager.machine, "getNumberOfProcessors", None )() + if hasattr(manager.machine, "getNumberOfProcessors"): + totalNumberOfProcessors = getattr(manager.machine, "getNumberOfProcessors", None)() else: totalNumberOfProcessors = 1 - reporter = reporting.ReportTextPeriodic( manager.testlist ) - reporter.report( geos_atsStartTime, totalNumberOfProcessors ) + reporter = reporting.ReportTextPeriodic(manager.testlist) + reporter.report(geos_atsStartTime, totalNumberOfProcessors) if configuration_record.config.report_html and configuration_record.config.report_html_periodic: testcases = test_case.TESTS.values() - reporter = reporting.ReportHTML( testcases ) - reporter.report( refresh=30 ) + reporter = reporting.ReportHTML(testcases) + reporter.report(refresh=30) if configuration_record.config.report_text: testcases = test_case.TESTS.values() - reporter = reporting.ReportText( testcases ) - with open( configuration_record.config.report_text_file, "w" ) as filep: - reporter.report( filep ) + reporter = reporting.ReportText(testcases) + with open(configuration_record.config.report_text_file, "w") as filep: + reporter.report(filep) -def append_geos_ats_summary( manager ): +def append_geos_ats_summary(manager): initial_summary = manager.summary - def new_summary( *xargs, **kwargs ): - initial_summary( *xargs, **kwargs ) - summary( manager, None ) + def new_summary(*xargs, **kwargs): + initial_summary(*xargs, **kwargs) + summary(manager, None) manager.summary = new_summary @@ -370,8 +289,8 @@ def main(): # --------------------------------- # Handle command line arguments # --------------------------------- - originalargv = sys.argv[ : ] - options = command_line_parsers.parse_command_line_arguments( originalargv ) + originalargv = sys.argv[:] + options = command_line_parsers.parse_command_line_arguments(originalargv) # Set logging verbosity verbosity_options = { @@ -380,24 +299,24 @@ def main(): 'warning': logging.WARNING, 'error': logging.ERROR } - logger.setLevel( verbosity_options[ options.verbose ] ) + logger.setLevel(verbosity_options[options.verbose]) # Set key environment variables before importing ats from geos_ats import machines search_path = '' if options.machine_dir is not None: - if os.path.isdir( options.machine_dir ): + if os.path.isdir(options.machine_dir): search_path = options.machine_dir else: - logger.error( f'Target machine dir does not exist: {options.machine_dir}' ) - logger.error( 'geos_ats will continue searching in the default path' ) + logger.error(f'Target machine dir does not exist: {options.machine_dir}') + logger.error('geos_ats will continue searching in the default path') if not search_path: - search_path = os.path.dirname( machines.__file__ ) - os.environ[ 'MACHINE_DIR' ] = search_path + search_path = os.path.dirname(machines.__file__) + os.environ['MACHINE_DIR'] = search_path if options.machine: - os.environ[ "MACHINE_TYPE" ] = options.machine + os.environ["MACHINE_TYPE"] = options.machine # --------------------------------- # Setup ATS @@ -405,12 +324,16 @@ def main(): configOverride = {} testcases = [] configFile = '' - check_working_dir( options.workingDir ) - create_log_directory( options ) + + # Setup paths + ats_root_dir = os.path.abspath(os.path.dirname(options.ats_target)) + os.chdir(ats_root_dir) + os.makedirs(options.workingDir, exist_ok=True) + create_log_directory(options) # Check the test configuration from geos_ats import configuration_record - configuration_record.initializeConfig( configFile, configOverride, options ) + configuration_record.initializeConfig(configFile, configOverride, options) config = configuration_record.config config.geos_bin_dir = options.geos_bin_dir @@ -418,26 +341,25 @@ def main(): if 'skip_missing' in r: config.restart_skip_missing = True elif 'exclude' in r: - config.restart_exclude_pattern.append( r[ -1 ] ) + config.restart_exclude_pattern.append(r[-1]) # Check the report location if options.logs: - config.report_html_file = os.path.join( options.logs, 'test_results.html' ) - config.report_text_file = os.path.join( options.logs, 'test_results.txt' ) - config.report_ini_file = os.path.join( options.logs, 'test_results.ini' ) + config.report_html_file = os.path.join(options.logs, 'test_results.html') + config.report_text_file = os.path.join(options.logs, 'test_results.txt') + config.report_ini_file = os.path.join(options.logs, 'test_results.ini') - ats_files = check_ats_targets( options, testcases, configOverride, originalargv ) - build_ats_arguments( options, ats_files, originalargv, config ) + build_ats_arguments(options, originalargv, config) # Additional setup tasks - check_timing_file( options, config ) - handle_salloc_relaunch( options, originalargv, configOverride ) + check_timing_file(options, config) + handle_salloc_relaunch(options, originalargv, configOverride) # Print config information - logger.debug( "*" * 80 ) + logger.debug("*" * 80) for notation in config.report_notations: - logger.debug( notation ) - logger.debug( "*" * 80 ) + logger.debug(notation) + logger.debug("*" * 80) # --------------------------------- # Initialize ATS @@ -445,37 +367,40 @@ def main(): geos_atsStartTime = time.time() # Note: the sys.argv is read here by default - import ats # type: ignore[import] + import ats # type: ignore[import] ats.manager.init() - logger.debug( 'Copying options to the geos_ats config record file' ) - config.copy_values( ats.manager.machine ) + logger.debug('Copying options to the geos_ats config record file') + config.copy_values(ats.manager.machine) # Glue global values - ats.AtsTest.glue( action=options.action ) - ats.AtsTest.glue( checkoption=options.check ) - ats.AtsTest.glue( configFile=configFile ) - ats.AtsTest.glue( configOverride=configOverride ) - ats.AtsTest.glue( testmode=False ) - ats.AtsTest.glue( atsFlags=options.ats ) - ats.AtsTest.glue( atsFiles=ats_files ) - ats.AtsTest.glue( machine=options.machine ) - ats.AtsTest.glue( config=config ) - if len( testcases ): - ats.AtsTest.glue( testcases=testcases ) + ats.AtsTest.glue(action=options.action) + ats.AtsTest.glue(checkoption=options.check) + ats.AtsTest.glue(configFile=configFile) + ats.AtsTest.glue(configOverride=configOverride) + ats.AtsTest.glue(testmode=False) + ats.AtsTest.glue(workingDir=options.workingDir) + ats.AtsTest.glue(baselineDir=options.baselineDir) + ats.AtsTest.glue(logDir=options.logs) + ats.AtsTest.glue(atsRootDir=ats_root_dir) + ats.AtsTest.glue(atsFlags=options.ats) + ats.AtsTest.glue(atsFiles=options.ats_target) + ats.AtsTest.glue(machine=options.machine) + ats.AtsTest.glue(config=config) + if len(testcases): + ats.AtsTest.glue(testcases=testcases) else: - ats.AtsTest.glue( testcases="all" ) + ats.AtsTest.glue(testcases="all") - from geos_ats import ( common_utilities, suite_settings, test_case, test_steps, user_utilities ) + from geos_ats import (common_utilities, suite_settings, test_case, test_steps) # Set ats options - append_geos_ats_summary( ats.manager ) - append_test_end_step( ats.manager.machine ) + append_geos_ats_summary(ats.manager) ats.manager.machine.naptime = 0.2 ats.log.echo = True # Logging - if options.action in ( "run", "rerun", "check", "continue" ): - write_log_dir_summary( options.logs, originalargv ) + if options.action in ("run", "rerun", "check", "continue"): + write_log_dir_summary(options.logs, originalargv) if options.action in test_actions: ats.manager.firstBanner() @@ -487,41 +412,41 @@ def main(): # Make sure all the testcases requested were found if testcases != "all": - if len( testcases ): - logger.error( f"ERROR: Unknown testcases {str(testcases)}" ) - logger.error( f"ATS files: {str(ats_files)}" ) - sys.exit( 1 ) + if len(testcases): + logger.error(f"ERROR: Unknown testcases {str(testcases)}") + logger.error(f"ATS files: {str(ats_files)}") + sys.exit(1) # Report: if options.action in report_actions: - report( ats.manager ) + report(ats.manager) # clean if options.action == "veryclean": - common_utilities.removeLogDirectories( os.getcwd() ) - files = [ config.report_html_file, config.report_ini_file, config.report_text_file ] + common_utilities.removeLogDirectories(os.getcwd()) + files = [config.report_html_file, config.report_ini_file, config.report_text_file] for f in files: - if os.path.exists( f ): - os.remove( f ) + if os.path.exists(f): + os.remove(f) # clean the temporary logfile that is not needed for certain actions. if options.action not in test_actions: if options.logs is not None: - if os.path.exists( options.logs ): - shutil.rmtree( options.logs ) + if os.path.exists(options.logs): + shutil.rmtree(options.logs) # return 0 if all tests passed, 1 otherwise try: if options.failIfTestsFail: - with open( os.path.join( options.logs, "test_results.html" ), 'r' ) as f: - contents = ''.join( f.readlines() ).split( "DETAILED RESULTS" )[ 1 ] + with open(os.path.join(options.logs, "test_results.html"), 'r') as f: + contents = ''.join(f.readlines()).split("DETAILED RESULTS")[1] messages = [ "class=\"red\">FAIL", "class=\"yellow\">SKIPPED", "class=\"reddish\">FAIL", "class=\"yellow\">NOT RUN" ] - result = any( [ m in contents for m in messages ] ) + result = any([m in contents for m in messages]) except IOError as e: - logger.debug( e ) + logger.debug(e) # Other ATS steps not previously included: ats.manager.postprocess() @@ -530,13 +455,13 @@ def main(): ats.manager.finalBanner() # Remove unnecessary log dirs created with clean runs - none_dir = os.path.join( options.workingDir, 'None' ) - if os.path.exists( none_dir ): - shutil.rmtree( none_dir ) + none_dir = os.path.join(options.workingDir, 'None') + if os.path.exists(none_dir): + shutil.rmtree(none_dir) return result if __name__ == "__main__": result = main() - sys.exit( result ) + sys.exit(result) diff --git a/geos_ats_package/geos_ats/reporting.py b/geos_ats_package/geos_ats/reporting.py index 1f9a3bb..5230850 100644 --- a/geos_ats_package/geos_ats/reporting.py +++ b/geos_ats_package/geos_ats/reporting.py @@ -5,12 +5,12 @@ import re from geos_ats.configuration_record import config import sys -import ats # type: ignore[import] +import ats # type: ignore[import] from configparser import ConfigParser import logging # Get the active logger instance -logger = logging.getLogger( 'geos_ats' ) +logger = logging.getLogger('geos_ats') # The following are ALEATS test status values. # The order is important for the ReportGroup: lower values take precendence @@ -30,204 +30,218 @@ EXPECTEDFAIL = 13 UNEXPECTEDPASS = 14 +status_map = { + 'FAILRUN': FAILRUN, + 'FAILCHECK': FAILCHECK, + 'FAILCHECKMINOR': FAILCHECKMINOR, + 'TIMEOUT': TIMEOUT, + 'INPROGRESS': INPROGRESS, + 'NOTRUN': NOTRUN, + 'FILTERED': FILTERED, + 'RUNNING': RUNNING, + 'SKIP': SKIP, + 'BATCH': BATCH, + 'FAILRUNOPTIONAL': FAILRUNOPTIONAL, + 'NOTBUILT': NOTBUILT, + 'PASS': PASS, + 'EXPECTEDFAIL': EXPECTEDFAIL, + 'UNEXPECTEDPASS': UNEXPECTEDPASS +} + # A tuple of test status values. -STATUS = ( FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, FILTERED, RUNNING, - INPROGRESS, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT ) +STATUS = (FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, FILTERED, RUNNING, + INPROGRESS, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT) -STATUS_NOTDONE = ( NOTRUN, RUNNING, INPROGRESS, BATCH ) +STATUS_NOTDONE = (NOTRUN, RUNNING, INPROGRESS, BATCH) -class ReportBase( object ): +class ReportBase(object): """Base class for reporting. The constructor takes in a sequence of testcases (of type test_case), and from each testcase, a ReportTestCase object is created.""" - def __init__( self, testcases ): + def __init__(self, testcases): pass -class ReportTiming( ReportBase ): +class ReportTiming(ReportBase): """Reporting class that is used for outputting test timings""" - def __init__( self, testcases ): - self.reportcases = [ ReportTestCase( t ) for t in testcases ] + def __init__(self, testcases): + self.reportcases = [ReportTestCase(t) for t in testcases] self.timings = {} - def getOldTiming( self, fp ): + def getOldTiming(self, fp): for line in fp: - if not line.startswith( '#' ): + if not line.startswith('#'): tokens = line.split() - self.timings[ tokens[ 0 ] ] = int( tokens[ 1 ] ) + self.timings[tokens[0]] = int(tokens[1]) - def report( self, fp ): + def report(self, fp): for testcase in self.reportcases: - if testcase.status in [ PASS, TIMEOUT ]: - self.timings[ testcase.testcase.name ] = int( testcase.testcase.status.totalTime() ) + if testcase.status in [PASS, TIMEOUT]: + self.timings[testcase.testcase.name] = int(testcase.testcase.status.totalTime()) output = "" - for key in sorted( self.timings ): - output += "%s %d\n" % ( key, self.timings[ key ] ) - fp.writelines( output ) + for key in sorted(self.timings): + output += "%s %d\n" % (key, self.timings[key]) + fp.writelines(output) -class ReportIni( ReportBase ): +class ReportIni(ReportBase): """Minimal reporting class that is used for bits status emails""" - def __init__( self, testcases ): - self.reportcases = [ ReportTestCase( t ) for t in testcases ] + def __init__(self, testcases): + self.reportcases = [ReportTestCase(t) for t in testcases] # A dictionary where the key is a status, and the value is a sequence of ReportTestCases self.reportcaseResults = {} for status in STATUS: - self.reportcaseResults[ status ] = [ t for t in self.reportcases if t.status == status ] + self.reportcaseResults[status] = [t for t in self.reportcases if t.status == status] self.displayName = {} - self.displayName[ FAILRUN ] = "FAILRUN" - self.displayName[ FAILRUNOPTIONAL ] = "FAILRUNOPTIONAL" - self.displayName[ FAILCHECK ] = "FAILCHECK" - self.displayName[ FAILCHECKMINOR ] = "FAILCHECKMINOR" - self.displayName[ TIMEOUT ] = "TIMEOUT" - self.displayName[ NOTRUN ] = "NOTRUN" - self.displayName[ INPROGRESS ] = "INPROGRESS" - self.displayName[ FILTERED ] = "FILTERED" - self.displayName[ RUNNING ] = "RUNNING" - self.displayName[ PASS ] = "PASSED" - self.displayName[ SKIP ] = "SKIPPED" - self.displayName[ BATCH ] = "BATCHED" - self.displayName[ NOTBUILT ] = "NOTBUILT" - self.displayName[ EXPECTEDFAIL ] = "EXPECTEDFAIL" - self.displayName[ UNEXPECTEDPASS ] = "UNEXPECTEDPASS" - - def __getTestCaseName( testcase ): - return testcase.testcase.name - - def report( self, fp ): + self.displayName[FAILRUN] = "FAILRUN" + self.displayName[FAILRUNOPTIONAL] = "FAILRUNOPTIONAL" + self.displayName[FAILCHECK] = "FAILCHECK" + self.displayName[FAILCHECKMINOR] = "FAILCHECKMINOR" + self.displayName[TIMEOUT] = "TIMEOUT" + self.displayName[NOTRUN] = "NOTRUN" + self.displayName[INPROGRESS] = "INPROGRESS" + self.displayName[FILTERED] = "FILTERED" + self.displayName[RUNNING] = "RUNNING" + self.displayName[PASS] = "PASSED" + self.displayName[SKIP] = "SKIPPED" + self.displayName[BATCH] = "BATCHED" + self.displayName[NOTBUILT] = "NOTBUILT" + self.displayName[EXPECTEDFAIL] = "EXPECTEDFAIL" + self.displayName[UNEXPECTEDPASS] = "UNEXPECTEDPASS" + + def report(self, fp): configParser = ConfigParser() - configParser.add_section( "Info" ) - configParser.set( "Info", "Time", time.strftime( "%a, %d %b %Y %H:%M:%S" ) ) + configParser.add_section("Info") + configParser.set("Info", "Time", time.strftime("%a, %d %b %Y %H:%M:%S")) try: platform = socket.gethostname() except: - logger.debug( "Could not get host name" ) + logger.debug("Could not get host name") platform = "unknown" - configParser.set( "Info", "Platform", platform ) + configParser.set("Info", "Platform", platform) extraNotations = "" for line in config.report_notations: - line_split = line.split( ":" ) - if len( line_split ) != 2: - line_split = line.split( "=" ) - if len( line_split ) != 2: + line_split = line.split(":") + if len(line_split) != 2: + line_split = line.split("=") + if len(line_split) != 2: extraNotations += "\"" + line.strip() + "\"" continue - configParser.set( "Info", line_split[ 0 ].strip(), line_split[ 1 ].strip() ) + configParser.set("Info", line_split[0].strip(), line_split[1].strip()) if extraNotations != "": - configParser.set( "Info", "Extra Notations", extraNotations ) + configParser.set("Info", "Extra Notations", extraNotations) - configParser.add_section( "Results" ) - configParser.add_section( "Custodians" ) - configParser.add_section( "Documentation" ) + configParser.add_section("Results") + configParser.add_section("Custodians") + configParser.add_section("Documentation") undocumentedTests = [] for status in STATUS: testNames = [] - for reportcaseResult in self.reportcaseResults[ status ]: + for reportcaseResult in self.reportcaseResults[status]: testName = reportcaseResult.testcase.name - testNames.append( testName ) + testNames.append(testName) - owner = getowner( testName, reportcaseResult.testcase ) + owner = getowner(testName, reportcaseResult.testcase) if owner is not None: - configParser.set( "Custodians", testName, owner ) + configParser.set("Custodians", testName, owner) if config.report_doc_link: - linkToDocumentation = os.path.join( config.report_doc_dir, testName, testName + ".html" ) - if os.path.exists( linkToDocumentation ): - configParser.set( "Documentation", testName, linkToDocumentation ) + linkToDocumentation = os.path.join(config.report_doc_dir, testName, testName + ".html") + if os.path.exists(linkToDocumentation): + configParser.set("Documentation", testName, linkToDocumentation) else: if not reportcaseResult.testcase.nodoc: - undocumentedTests.append( testName ) - linkToDocumentation = getowner( testName, reportcaseResult.testcase ) - testNames = sorted( testNames ) - configParser.set( "Results", self.displayName[ status ], ";".join( testNames ) ) - undocumentedTests = sorted( undocumentedTests ) - configParser.set( "Documentation", "undocumented", ";".join( undocumentedTests ) ) - configParser.write( fp ) + undocumentedTests.append(testName) + linkToDocumentation = getowner(testName, reportcaseResult.testcase) + testNames = sorted(testNames) + configParser.set("Results", self.displayName[status], ";".join(testNames)) + undocumentedTests = sorted(undocumentedTests) + configParser.set("Documentation", "undocumented", ";".join(undocumentedTests)) + configParser.write(fp) -class ReportText( ReportBase ): +class ReportText(ReportBase): - def __init__( self, testcases ): + def __init__(self, testcases): - ReportBase.__init__( self, testcases ) + ReportBase.__init__(self, testcases) - self.reportcases = [ ReportTestCase( t ) for t in testcases ] + self.reportcases = [ReportTestCase(t) for t in testcases] # A dictionary where the key is a status, and the value is a sequence of ReportTestCases self.reportcaseResults = {} for status in STATUS: - self.reportcaseResults[ status ] = [ t for t in self.reportcases if t.status == status ] + self.reportcaseResults[status] = [t for t in self.reportcases if t.status == status] self.displayName = {} - self.displayName[ FAILRUN ] = "FAIL RUN" - self.displayName[ FAILRUNOPTIONAL ] = "FAIL RUN (OPTIONAL STEP)" - self.displayName[ FAILCHECK ] = "FAIL CHECK" - self.displayName[ FAILCHECKMINOR ] = "FAIL CHECK (MINOR)" - self.displayName[ TIMEOUT ] = "TIMEOUT" - self.displayName[ NOTRUN ] = "NOT RUN" - self.displayName[ INPROGRESS ] = "INPROGRESS" - self.displayName[ FILTERED ] = "FILTERED" - self.displayName[ RUNNING ] = "RUNNING" - self.displayName[ PASS ] = "PASSED" - self.displayName[ SKIP ] = "SKIPPED" - self.displayName[ BATCH ] = "BATCHED" - self.displayName[ NOTBUILT ] = "NOT BUILT" - self.displayName[ EXPECTEDFAIL ] = "EXPECTEDFAIL" - self.displayName[ UNEXPECTEDPASS ] = "UNEXPECTEDPASS" - - def report( self, fp ): + self.displayName[FAILRUN] = "FAIL RUN" + self.displayName[FAILRUNOPTIONAL] = "FAIL RUN (OPTIONAL STEP)" + self.displayName[FAILCHECK] = "FAIL CHECK" + self.displayName[FAILCHECKMINOR] = "FAIL CHECK (MINOR)" + self.displayName[TIMEOUT] = "TIMEOUT" + self.displayName[NOTRUN] = "NOT RUN" + self.displayName[INPROGRESS] = "INPROGRESS" + self.displayName[FILTERED] = "FILTERED" + self.displayName[RUNNING] = "RUNNING" + self.displayName[PASS] = "PASSED" + self.displayName[SKIP] = "SKIPPED" + self.displayName[BATCH] = "BATCHED" + self.displayName[NOTBUILT] = "NOT BUILT" + self.displayName[EXPECTEDFAIL] = "EXPECTEDFAIL" + self.displayName[UNEXPECTEDPASS] = "UNEXPECTEDPASS" + + def report(self, fp): """Write out the text report to the give file pointer""" - self.writeSummary( fp, ( FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, - INPROGRESS, FILTERED, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT ) ) - self.writeLongest( fp, 5 ) - self.writeDetails( fp, - ( FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, FILTERED ) ) + self.writeSummary(fp, (FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, + INPROGRESS, FILTERED, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT)) + self.writeLongest(fp, 5) + self.writeDetails(fp, (FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, FILTERED)) - def writeSummary( self, fp, statuses=STATUS ): + def writeSummary(self, fp, statuses=STATUS): """The summary groups each TestCase by its status.""" - fp.write( "=" * 80 ) + fp.write("=" * 80) from geos_ats import common_utilities for status in statuses: - tests = self.reportcaseResults[ status ] - num = len( tests ) - fp.write( f"\n {self.displayName[status]} : {num}" ) + tests = self.reportcaseResults[status] + num = len(tests) + fp.write(f"\n {self.displayName[status]} : {num}") if num > 0: testlist = [] for test in tests: testname = test.testcase.name - retries = getattr( test.testcase.atsGroup, "retries", 0 ) + retries = getattr(test.testcase.atsGroup, "retries", 0) if retries > 0: testname += '[retry:%d]' % retries - testlist.append( testname ) - fp.write( f' ( {" ".join( testlist )} ) ' ) + testlist.append(testname) + fp.write(f' ( {" ".join( testlist )} ) ') - def writeDetails( self, - fp, - statuses=( FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, INPROGRESS ), - columns=( "Status", "TestCase", "Elapsed", "Resources", "TestStep", "OutFile" ) ): + def writeDetails(self, + fp, + statuses=(FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, INPROGRESS), + columns=("Status", "TestCase", "Elapsed", "Resources", "TestStep", "OutFile")): """This function provides more information about each of the test cases""" from geos_ats import common_utilities - table = common_utilities.TextTable( len( columns ) ) - table.setHeader( *columns ) + table = common_utilities.TextTable(len(columns)) + table.setHeader(*columns) table.rowbreakstyle = "-" printTable = False for status in statuses: - tests = self.reportcaseResults[ status ] + tests = self.reportcaseResults[status] - if len( tests ) == 0: + if len(tests) == 0: continue printTable = True @@ -236,43 +250,43 @@ def writeDetails( self, label = "" pathstr = "" if test.laststep: - paths = testcase.resultPaths( test.laststep ) + paths = testcase.resultPaths(test.laststep) label = test.laststep.label() - pathstr = " ".join( [ os.path.relpath( x ) for x in paths ] ) + pathstr = " ".join([os.path.relpath(x) for x in paths]) row = [] for col in columns: if col == "Status": - statusDisplay = self.displayName[ test.status ] - retries = getattr( testcase.atsGroup, "retries", 0 ) + statusDisplay = self.displayName[test.status] + retries = getattr(testcase.atsGroup, "retries", 0) if retries > 0: statusDisplay += "/retry:%d" % retries - row.append( statusDisplay ) + row.append(statusDisplay) elif col == "Directory": - row.append( os.path.relpath( testcase.path ) ) + row.append(os.path.relpath(testcase.path)) elif col == "TestCase": - row.append( testcase.name ) + row.append(testcase.name) elif col == "TestStep": - row.append( label ) + row.append(label) elif col == "OutFile": - row.append( pathstr ) + row.append(pathstr) elif col == "Elapsed": - row.append( ats.times.hms( test.elapsed ) ) + row.append(ats.times.hms(test.elapsed)) elif col == "Resources": - row.append( ats.times.hms( test.resources ) ) + row.append(ats.times.hms(test.resources)) else: - raise RuntimeError( f"Unknown column {col}" ) + raise RuntimeError(f"Unknown column {col}") - table.addRow( *row ) + table.addRow(*row) table.addRowBreak() - fp.write( '\n' ) + fp.write('\n') if printTable: - table.printTable( fp ) - fp.write( '\n' ) + table.printTable(fp) + fp.write('\n') - def writeLongest( self, fp, num=5 ): + def writeLongest(self, fp, num=5): """The longer running tests are reported""" timing = [] @@ -280,39 +294,39 @@ def writeLongest( self, fp, num=5 ): for test in self.reportcases: elapsed = test.elapsed if elapsed > 0: - timing.append( ( elapsed, test ) ) + timing.append((elapsed, test)) - timing = sorted( timing, reverse=True ) + timing = sorted(timing, reverse=True) - if len( timing ) > 0: - fp.write( '\n' ) - fp.write( '\n LONGEST RUNNING TESTS:' ) - for elapsed, test in timing[ :num ]: - fp.write( f" {ats.times.hms(elapsed)} {test.testcase.name}" ) + if len(timing) > 0: + fp.write('\n') + fp.write('\n LONGEST RUNNING TESTS:') + for elapsed, test in timing[:num]: + fp.write(f" {ats.times.hms(elapsed)} {test.testcase.name}") -class ReportTextPeriodic( ReportText ): +class ReportTextPeriodic(ReportText): """This class is used during the periodic reports. It is initialized with the actual ATS tests from the ATS manager object. The report inherits from ReportText, and extend that behavior with """ - def __init__( self, atstests ): + def __init__(self, atstests): self.atstest = atstests - testcases = list( set( [ test.geos_atsTestCase for test in atstests ] ) ) - ReportText.__init__( self, testcases ) - - def report( self, startTime, totalProcessors=None ): - self.writeSummary( sys.stdout, - ( FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, - INPROGRESS, FILTERED, RUNNING, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT ) ) - self.writeUtilization( sys.stdout, startTime, totalProcessors ) - self.writeLongest( sys.stdout ) - self.writeDetails( sys.stdout, ( FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, RUNNING ), - ( "Status", "TestCase", "Directory", "Elapsed", "Resources", "TestStep" ) ) - - def writeUtilization( self, fp, startTime, totalProcessors=None ): + testcases = list(set([test.geos_atsTestCase for test in atstests])) + ReportText.__init__(self, testcases) + + def report(self, startTime, totalProcessors=None): + self.writeSummary(sys.stdout, + (FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, + INPROGRESS, FILTERED, RUNNING, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT)) + self.writeUtilization(sys.stdout, startTime, totalProcessors) + self.writeLongest(sys.stdout) + self.writeDetails(sys.stdout, (FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, RUNNING), + ("Status", "TestCase", "Directory", "Elapsed", "Resources", "TestStep")) + + def writeUtilization(self, fp, startTime, totalProcessors=None): """Machine utilization is reported""" totalResourcesUsed = 0.0 totaltime = time.time() - startTime @@ -322,27 +336,27 @@ def writeUtilization( self, fp, startTime, totalProcessors=None ): totalResourcesUsed += resources if totalResourcesUsed > 0: - fp.write( '\n' ) - fp.write( f"\n TOTAL TIME : {ats.times.hms( totaltime )}" ) - fp.write( f"\n TOTAL PROCESSOR-TIME : {ats.times.hms(totalResourcesUsed )}" ) + fp.write('\n') + fp.write(f"\n TOTAL TIME : {ats.times.hms( totaltime )}") + fp.write(f"\n TOTAL PROCESSOR-TIME : {ats.times.hms(totalResourcesUsed )}") if totalProcessors: availableResources = totalProcessors * totaltime utilization = totalResourcesUsed / availableResources * 100.0 - fp.write( f" AVAIL PROCESSOR-TIME : {ats.times.hms(availableResources )}" ) - fp.write( f" RESOURCE UTILIZATION : {utilization:5.3g}%" ) + fp.write(f" AVAIL PROCESSOR-TIME : {ats.times.hms(availableResources )}") + fp.write(f" RESOURCE UTILIZATION : {utilization:5.3g}%") -class ReportHTML( ReportBase ): +class ReportHTML(ReportBase): """HTML Reporting""" # only launch a web browser once. launchedBrowser = False - def __init__( self, testcases ): - ReportBase.__init__( self, testcases ) + def __init__(self, testcases): + ReportBase.__init__(self, testcases) - self.reportcases = [ ReportTestCase( t ) for t in testcases ] + self.reportcases = [ReportTestCase(t) for t in testcases] # A dictionary keyed by Status. The value is a list of ReportGroup self.groupResults = None @@ -354,125 +368,92 @@ def __init__( self, testcases ): self.initializeReportGroups() self.color = {} - self.color[ FAILRUN ] = "red" - self.color[ FAILRUNOPTIONAL ] = "yellow" - self.color[ FAILCHECK ] = "reddish" - self.color[ FAILCHECKMINOR ] = "reddish" - self.color[ TIMEOUT ] = "reddish" - self.color[ NOTRUN ] = "yellow" - self.color[ INPROGRESS ] = "blue" - self.color[ FILTERED ] = "blueish" - self.color[ RUNNING ] = "orange" - self.color[ PASS ] = "green" - self.color[ SKIP ] = "yellow" - self.color[ BATCH ] = "yellow" - self.color[ NOTBUILT ] = "blueish" - self.color[ EXPECTEDFAIL ] = "green" - self.color[ UNEXPECTEDPASS ] = "red" + self.color[FAILRUN] = "red" + self.color[FAILRUNOPTIONAL] = "yellow" + self.color[FAILCHECK] = "reddish" + self.color[FAILCHECKMINOR] = "reddish" + self.color[TIMEOUT] = "reddish" + self.color[NOTRUN] = "yellow" + self.color[INPROGRESS] = "blue" + self.color[FILTERED] = "blueish" + self.color[RUNNING] = "orange" + self.color[PASS] = "green" + self.color[SKIP] = "yellow" + self.color[BATCH] = "yellow" + self.color[NOTBUILT] = "blueish" + self.color[EXPECTEDFAIL] = "green" + self.color[UNEXPECTEDPASS] = "red" self.displayName = {} - self.displayName[ FAILRUN ] = "FAIL RUN" - self.displayName[ FAILRUNOPTIONAL ] = "FAIL RUN (OPTIONAL STEP)" - self.displayName[ FAILCHECK ] = "FAIL CHECK" - self.displayName[ FAILCHECKMINOR ] = "FAIL CHECK (MINOR)" - self.displayName[ TIMEOUT ] = "TIMEOUT" - self.displayName[ NOTRUN ] = "NOT RUN" - self.displayName[ INPROGRESS ] = "INPROGRESS" - self.displayName[ FILTERED ] = "FILTERED" - self.displayName[ RUNNING ] = "RUNNING" - self.displayName[ PASS ] = "PASSED" - self.displayName[ SKIP ] = "SKIPPED" - self.displayName[ BATCH ] = "BATCHED" - self.displayName[ NOTBUILT ] = "NOTBUILT" - self.displayName[ EXPECTEDFAIL ] = "EXPECTEDFAIL" - self.displayName[ UNEXPECTEDPASS ] = "UNEXPECTEDPASS" + self.displayName[FAILRUN] = "FAIL RUN" + self.displayName[FAILRUNOPTIONAL] = "FAIL RUN (OPTIONAL STEP)" + self.displayName[FAILCHECK] = "FAIL CHECK" + self.displayName[FAILCHECKMINOR] = "FAIL CHECK (MINOR)" + self.displayName[TIMEOUT] = "TIMEOUT" + self.displayName[NOTRUN] = "NOT RUN" + self.displayName[INPROGRESS] = "INPROGRESS" + self.displayName[FILTERED] = "FILTERED" + self.displayName[RUNNING] = "RUNNING" + self.displayName[PASS] = "PASSED" + self.displayName[SKIP] = "SKIPPED" + self.displayName[BATCH] = "BATCHED" + self.displayName[NOTBUILT] = "NOTBUILT" + self.displayName[EXPECTEDFAIL] = "EXPECTEDFAIL" + self.displayName[UNEXPECTEDPASS] = "UNEXPECTEDPASS" self.html_filename = config.report_html_file - def initializeReportGroups( self ): + def initializeReportGroups(self): testdir = {} # place testcases into groups for reportcase in self.reportcases: dirname = reportcase.testcase.dirname if dirname not in testdir: - testdir[ dirname ] = [] - testdir[ dirname ].append( reportcase ) + testdir[dirname] = [] + testdir[dirname].append(reportcase) - self.groups = [ ReportGroup( key, value ) for key, value in testdir.items() ] + self.groups = [ReportGroup(key, value) for key, value in testdir.items()] # place groups into a dictionary keyed on the group status self.groupResults = {} for status in STATUS: - self.groupResults[ status ] = [ g for g in self.groups if g.status == status ] + self.groupResults[status] = [g for g in self.groups if g.status == status] - def report( self, refresh=0 ): + def report(self, refresh=0): # potentially regenerate the html documentation for the test suite. # # This doesn't seem to work: # self.generateDocumentation() - sp = open( self.html_filename, 'w' ) + sp = open(self.html_filename, 'w') if refresh: - if not any( g.status in ( RUNNING, NOTRUN, INPROGRESS ) for g in self.groups ): + if not any(g.status in (RUNNING, NOTRUN, INPROGRESS) for g in self.groups): refresh = 0 - self.writeHeader( sp, refresh ) - self.writeSummary( sp ) + self.writeHeader(sp, refresh) + self.writeSummary(sp) if config.report_doc_link: - self.writeDoclink( sp ) + self.writeDoclink(sp) # Set the columns to display if config.report_doc_link: - groupColumns = ( "Name", "Custodian", "Status" ) + groupColumns = ("Name", "Custodian", "Status") else: - groupColumns = ( "Name", "Status" ) + groupColumns = ("Name", "Status") - testcaseColumns = ( "Status", "Name", "TestStep", "Age", "Elapsed", "Resources", "Output" ) + testcaseColumns = ("Status", "Name", "TestStep", "Age", "Elapsed", "Resources", "Output") # write the details - self.writeTable( sp, groupColumns, testcaseColumns ) - self.writeFooter( sp ) + self.writeTable(sp, groupColumns, testcaseColumns) + self.writeFooter(sp) sp.close() # launch the browser, if requested. self.browser() - def generateDocumentation( self ): - """Generate the HTML documentation using atddoc""" - if not config.report_doc_link: - return - - testdocfile = os.path.join( config.report_doc_dir, "testdoc.html" ) - if ( os.path.exists( testdocfile ) and not config.report_doc_remake ): - # Check for any atd files newer than the test html documentation - newest = 0 - for root, dirs, files in os.walk( config.report_doc_dir ): - for file in files: - if file.endswith( ".atd" ): - filetime = os.path.getmtime( os.path.join( root, file ) ) - if filetime > newest: - newest = filetime - if os.path.getmtime( testdocfile ) > newest: - logger.info( f"HTML documentation found in {os.path.relpath(testdocfile)}. Not regenerating." ) - return - - logger.info( "Generating HTML documentation files (running 'atddoc')..." ) - retcode = True - try: - geos_atsdir = os.path.realpath( os.path.dirname( __file__ ) ) - atddoc = os.path.join( geos_atsdir, "atddoc.py" ) - #retcode = subprocess.call( atddoc, cwd=config.report_doc_dir, stdout=subprocess.PIPE) - retcode = subprocess.call( atddoc, cwd=config.report_doc_dir ) - except OSError as e: - logger.debug( e ) - if retcode: - logger.info( f" Failed to create HTML documentation in {config.report_doc_dir}" ) - else: - logger.info( f" HTML documentation created in {config.report_doc_dir}" ) - - def writeRowHeader( self, sp, groupColumns, testcaseColumns ): + def writeRowHeader(self, sp, groupColumns, testcaseColumns): header = f""" """ - sp.write( header ) + sp.write(header) - def writeTable( self, sp, groupColumns, testcaseColumns ): - colspan = len( groupColumns ) + len( testcaseColumns ) + def writeTable(self, sp, groupColumns, testcaseColumns): + colspan = len(groupColumns) + len(testcaseColumns) header = f""" @@ -533,12 +514,12 @@ def writeTable( self, sp, groupColumns, testcaseColumns ): rowcount = 0 testgroups = [] for status in STATUS: - testgroups.extend( self.groupResults[ status ] ) + testgroups.extend(self.groupResults[status]) for test in testgroups: - rowspan = len( test.testcases ) + rowspan = len(test.testcases) if rowcount <= 0: - self.writeRowHeader( sp, groupColumns, testcaseColumns ) + self.writeRowHeader(sp, groupColumns, testcaseColumns) rowcount += 30 rowcount -= rowspan @@ -558,7 +539,7 @@ def writeTable( self, sp, groupColumns, testcaseColumns ): elif col == "Custodian": if config.report_doc_link: - owner = getowner( test.name, test.testcases[ 0 ].testcase ) + owner = getowner(test.name, test.testcases[0].testcase) if owner is not None: header += f'\n ' else: @@ -568,14 +549,14 @@ def writeTable( self, sp, groupColumns, testcaseColumns ): elif col == "Status": header += f'' else: - raise RuntimeError( f"Unknown column {col}" ) + raise RuntimeError(f"Unknown column {col}") for testcase in test.testcases: for col in testcaseColumns: if col == "Status": - statusDisplay = self.displayName[ testcase.status ] - retries = getattr( testcase.testcase.atsGroup, "retries", 0 ) + statusDisplay = self.displayName[testcase.status] + retries = getattr(testcase.testcase.atsGroup, "retries", 0) if retries > 0: statusDisplay += "
retry: %d" % retries header += f'\n' @@ -587,34 +568,33 @@ def writeTable( self, sp, groupColumns, testcaseColumns ): if config.report_doc_link: docfound = False # first check for the full problem name, with the domain extension - testhtml = os.path.join( config.report_doc_dir, test.name, - testcase.testcase.name + ".html" ) - if os.path.exists( testhtml ): + testhtml = os.path.join(config.report_doc_dir, test.name, testcase.testcase.name + ".html") + if os.path.exists(testhtml): docfound = True else: # next check for the full problem name without the domain extension - testhtml = os.path.join( config.report_doc_dir, test.name, - testcase.testcase.name + ".html" ) - if os.path.exists( testhtml ): + testhtml = os.path.join(config.report_doc_dir, test.name, + testcase.testcase.name + ".html") + if os.path.exists(testhtml): docfound = True else: # final check for any of the input file names for step in testcase.testcase.steps: - if getattr( step.p, "deck", None ): - [ inputname, suffix ] = getattr( step.p, "deck" ).rsplit( '.', 1 ) - testhtml = os.path.join( config.report_doc_dir, test.name, - inputname + ".html" ) - if os.path.exists( testhtml ): + if getattr(step.p, "deck", None): + [inputname, suffix] = getattr(step.p, "deck").rsplit('.', 1) + testhtml = os.path.join(config.report_doc_dir, test.name, + inputname + ".html") + if os.path.exists(testhtml): # match with the first input file docfound = True break if docfound: - testref = 'href="%s"' % ( testhtml ) + testref = 'href="%s"' % (testhtml) else: if not testcase.testcase.nodoc: testlinksuffix += '
undocumented' - undocumented.append( testcase.testcase.name ) + undocumented.append(testcase.testcase.name) header += f"\n" @@ -631,10 +611,10 @@ def writeTable( self, sp, groupColumns, testcaseColumns ): if testcase.diffage: difftime = testcase.diffage - days = int( difftime ) / 86400 + days = int(difftime) / 86400 if days > 0: difftime -= days * 86400 - hours = int( difftime ) / 3600 + hours = int(difftime) / 3600 if days == 0: # "New" diff file - don't color header += f'\n' @@ -663,17 +643,17 @@ def writeTable( self, sp, groupColumns, testcaseColumns ): header += "\n" else: - raise RuntimeError( f"Unknown column {col}" ) + raise RuntimeError(f"Unknown column {col}") header += '\n' @@ -682,17 +662,17 @@ def writeTable( self, sp, groupColumns, testcaseColumns ): if config.report_doc_link: header += '\n

Undocumented test problems:

' header += '\n\n" - sp.write( header ) + sp.write(header) - def writeHeader( self, sp, refresh ): - gentime = time.strftime( "%a, %d %b %Y %H:%M:%S" ) + def writeHeader(self, sp, refresh): + gentime = time.strftime("%a, %d %b %Y %H:%M:%S") header = """ @@ -765,13 +745,13 @@ def writeHeader( self, sp, refresh ): try: platform = socket.gethostname() except: - logger.debug( "Could not get host name" ) + logger.debug("Could not get host name") platform = "unknown" if os.name == "nt": - username = os.getenv( "USERNAME" ) + username = os.getenv("USERNAME") else: - username = os.getenv( "USER" ) + username = os.getenv("USER") header += f"""

@@ -791,9 +771,9 @@ def writeHeader( self, sp, refresh ):

""" - sp.write( header ) + sp.write(header) - def writeSummary( self, sp ): + def writeSummary(self, sp): summary = """
  {owner} {self.displayName[test.status]}{statusDisplay}{testcase.testcase.name}{testlinksuffix}{hours}h" seen = {} - for stepnum, step in enumerate( testcase.testcase.steps ): - paths = testcase.testcase.resultPaths( step ) + for stepnum, step in enumerate(testcase.testcase.steps): + paths = testcase.testcase.resultPaths(step) for p in paths: # if p has already been accounted for, doesn't exist, or is an empty file, don't print it. - if ( ( ( p in seen ) or not os.path.exists( p ) ) or ( os.stat( p )[ 6 ] == 0 ) ): + if (((p in seen) or not os.path.exists(p)) or (os.stat(p)[6] == 0)): continue header += f"\n{os.path.basename(p)}
" - seen[ p ] = 1 + seen[p] = 1 header += "\n
@@ -806,8 +786,8 @@ def writeSummary( self, sp ): haveRetry = False for status in STATUS: - cases = self.groupResults[ status ] - num = len( cases ) + cases = self.groupResults[status] + num = len(cases) summary += f""" @@ -822,7 +802,7 @@ def writeSummary( self, sp ): caseref = case.name retries = 0 for test in case.testcases: - retries += getattr( test.testcase.atsGroup, "retries", 0 ) + retries += getattr(test.testcase.atsGroup, "retries", 0) if retries > 0: haveRetry = True casename += '*' @@ -838,19 +818,19 @@ def writeSummary( self, sp ): if haveRetry: summary += '\n* indicates that test was retried at least once.' - sp.write( summary ) + sp.write(summary) # Write link to documentation for html - def writeDoclink( self, sp ): + def writeDoclink(self, sp): doc = """

Test problem names with a hyperlink have been documented, the HTML version of which can be viewed by clicking on the link. """ - testdoc = os.path.join( config.report_doc_dir, 'testdoc.html' ) - testsumm = os.path.join( config.report_doc_dir, 'testdoc-summary.txt' ) - if os.path.exists( testdoc ) and os.path.exists( testsumm ): + testdoc = os.path.join(config.report_doc_dir, 'testdoc.html') + testsumm = os.path.join(config.report_doc_dir, 'testdoc-summary.txt') + if os.path.exists(testdoc) and os.path.exists(testsumm): doc += f"""
Or, you can click here for the @@ -860,16 +840,16 @@ def writeDoclink( self, sp ): """ doc += '\n

' - sp.write( doc ) + sp.write(doc) - def writeFooter( self, sp ): + def writeFooter(self, sp): footer = """ """ - sp.write( footer ) + sp.write(footer) - def browser( self ): + def browser(self): if ReportHTML.launchedBrowser: return @@ -878,43 +858,43 @@ def browser( self ): ReportHTML.launchedBrowser = True command = config.browser_command.split() - command.append( "file:%s" % config.report_html_file ) - subprocess.Popen( command ) + command.append("file:%s" % config.report_html_file) + subprocess.Popen(command) -class ReportWait( ReportBase ): +class ReportWait(ReportBase): """This class is used while with the report_wait config option""" - def __init__( self, testcases ): - ReportBase.__init__( self, testcases ) + def __init__(self, testcases): + ReportBase.__init__(self, testcases) self.testcases = testcases - def report( self, fp ): + def report(self, fp): """Write out the text report to the give file pointer""" import time start = time.time() - sleeptime = 60 # interval to check (seconds) + sleeptime = 60 # interval to check (seconds) while True: notdone = [] for t in self.testcases: t.testReport() - report = ReportTestCase( t ) + report = ReportTestCase(t) if report.status in STATUS_NOTDONE: - notdone.append( t ) + notdone.append(t) if notdone: - rr = ReportText( self.testcases ) - rr.writeSummary( sys.stdout, - ( FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, - INPROGRESS, FILTERED, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT ) ) - time.sleep( sleeptime ) + rr = ReportText(self.testcases) + rr.writeSummary(sys.stdout, + (FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, + INPROGRESS, FILTERED, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT)) + time.sleep(sleeptime) else: break -class ReportTestCase( object ): +class ReportTestCase(object): """This class represents the outcome from a TestCase. It hides differences between off-line reports and the periodic reports (when the actual ATS test object is known). In addition to @@ -922,10 +902,10 @@ class ReportTestCase( object ): that was run, age of the test, the total elapsed time and total resources used.""" - def __init__( self, testcase ): + def __init__(self, testcase): - self.testcase = testcase # test_case - self.status = None # One of the STATUS values (e.g. FAILRUN, PASS, etc.) + self.testcase = testcase # test_case + self.status = None # One of the STATUS values (e.g. FAILRUN, PASS, etc.) self.laststep = None self.diffage = None self.elapsed = 0.0 @@ -939,14 +919,14 @@ def __init__( self, testcase ): if teststatus is None: self.status = NOTRUN return - elif teststatus in ( FILTERED, SKIP ): + elif teststatus in (FILTERED, SKIP): self.status = teststatus return else: - for stepnum, step in enumerate( testcase.steps ): + for stepnum, step in enumerate(testcase.steps): # Get the outcome and related information from the TestStep. - outcome, np, startTime, endTime = self._getStepInfo( step ) + outcome, np, startTime, endTime = self._getStepInfo(step) if outcome == "PASS": # So far so good, move on to the next step @@ -960,14 +940,14 @@ def __init__( self, testcase ): self.resources += np * dt outcome = "EXPECTEDFAIL" self.status = EXPECTEDFAIL - break # don't continue past an expected failure + break # don't continue past an expected failure if outcome == "UNEX": dt = endTime - startTime self.elapsed += dt self.resources += np * dt outcome = "UNEXPECTEDPASS" self.status = UNEXPECTEDPASS - break # don't continue past an unexpected pass + break # don't continue past an unexpected pass elif outcome == "SKIP": self.status = SKIP break @@ -1023,9 +1003,9 @@ def __init__( self, testcase ): else: self.status = FAILRUN try: - with open( step.p.stdout, 'r' ) as fp: + with open(step.p.stdout, 'r') as fp: for line in fp: - if re.search( config.report_notbuilt_regexp, line ): + if re.search(config.report_notbuilt_regexp, line): self.status = NOTBUILT break except: @@ -1039,76 +1019,77 @@ def __init__( self, testcase ): # Don't set the laststep, but use it to get the endTime self.status = PASS laststep = step - laststatus = teststatus.findStep( laststep ) - assert ( laststatus ) - self.diffage = now - laststatus[ "endTime" ] + self.diffage = 0.0 + # laststatus = teststatus.findStep(laststep) + # assert (laststatus) + # self.diffage = now - laststatus["endTime"] assert self.status in STATUS - def _getStepInfo( self, teststep ): + def _getStepInfo(self, teststep): """This function hides the differences between the TestStatus files and the information you can get from the ats test object. It returns (status, np, startTime, endTime )""" - atsTest = getattr( teststep, "atsTest", None ) + atsTest = getattr(teststep, "atsTest", None) endTime = None startTime = None if atsTest is not None: - status = str( atsTest.status ) - startTime = getattr( atsTest, "startTime", None ) - endTime = getattr( atsTest, "endTime", None ) + status = str(atsTest.status) + startTime = getattr(atsTest, "startTime", None) + endTime = getattr(atsTest, "endTime", None) if status == "PASS" and atsTest.expectedResult == ats.FAILED: status = "FAIL" if status == "FAIL" and atsTest.expectedResult == ats.FAILED: status = "UNEX" else: - stepstatus = self.testcase.status.findStep( teststep ) + stepstatus = self.testcase.status.findStep(teststep) if stepstatus is None: status = "INIT" else: - status = stepstatus[ "result" ] - startTime = stepstatus[ "startTime" ] - endTime = stepstatus[ "endTime" ] + status = stepstatus["result"] + startTime = stepstatus["startTime"] + endTime = stepstatus["endTime"] - np = getattr( teststep.p, "np", 1 ) + np = getattr(teststep.p, "np", 1) - if status in ( "SKIP", "FILT", "INIT", "PASS", "FAIL", "TIME", "EXEC", "BACH", "EXPT", "UNEX" ): - return ( status, np, startTime, endTime ) + if status in ("SKIP", "FILT", "INIT", "PASS", "FAIL", "TIME", "EXEC", "BACH", "EXPT", "UNEX"): + return (status, np, startTime, endTime) else: - return ( "SKIP", np, startTime, endTime ) + return ("SKIP", np, startTime, endTime) -class ReportGroup( object ): +class ReportGroup(object): """A class to represent a group of TestCases. Currently, the only grouping done is at the directory level: every testcase in a directory belongs to the same ReportGroup.""" - def __init__( self, groupName, testcases ): + def __init__(self, groupName, testcases): self.name = groupName self.testcases = testcases self.status = NOTRUN if self.testcases: - self.status = min( [ case.status for case in self.testcases ] ) + self.status = min([case.status for case in self.testcases]) assert self.status in STATUS - def __cmp__( self, other ): + def __cmp__(self, other): return self.name == other.name -def getowner( dirname, testcase=None ): +def getowner(dirname, testcase=None): owner = "" if not config.report_doc_link: try: - atdfile = os.path.join( config.report_doc_dir, dirname, dirname + ".atd" ) - with open( atdfile, "r" ) as fp: + atdfile = os.path.join(config.report_doc_dir, dirname, dirname + ".atd") + with open(atdfile, "r") as fp: for line in fp: - match = re.search( "CUSTODIAN:: +(.*)$", line ) + match = re.search("CUSTODIAN:: +(.*)$", line) if not match: - owner = match.group( 1 ) + owner = match.group(1) break except IOError as e: - logger.debug( e ) - if owner == "" and testcase and ( "owner" in testcase.dictionary ): - return testcase.dictionary[ "owner" ] + logger.debug(e) + if owner == "" and testcase and ("owner" in testcase.dictionary): + return testcase.dictionary["owner"] return owner diff --git a/geos_ats_package/geos_ats/rules.py b/geos_ats_package/geos_ats/rules.py index 741b5e2..e69de29 100644 --- a/geos_ats_package/geos_ats/rules.py +++ b/geos_ats_package/geos_ats/rules.py @@ -1,225 +0,0 @@ -#!/bin/env python - -import optparse -import subprocess -import os -import sys -#import glob -import shutil -import logging - -logger = logging.getLogger( 'geos_ats' ) - - -def switch( booleans, i ): - booleans[ i ] = not booleans[ i ] - - -def DeclareCompoundRuleClass( name, RuleA, RuleB ): - """ - Declares a class of name name that is a new rule that is - the combination of 2 base rules. - """ - tmp = type( name, ( RuleA, RuleB ), {} ) - tmp.numToggles = RuleA.numToggles + RuleB.numToggles - tmp.numCombinations = RuleA.numCombinations * RuleB.numCombinations - - # Define the initializer for the new class - def newInit( self, toggles ): - RuleA.__init__( self, toggles, 0, RuleA.numToggles ) - RuleB.__init__( self, toggles, RuleA.numToggles ) - - tmp.__init__ = newInit - globals()[ name ] = tmp - return tmp - - -def GenRules( RuleType ): - """ Generator that produces a rule for each possible combination of toggles""" - - nt = RuleType.numToggles - nc = RuleType.numCombinations - """" toggles is [1,2,4,8,16,...] masked by the bitmap of the rulecount. - For example, if nt = 3 (and thus nc = 8), resulting generated toggles are: - [0,0,0] - [1,0,0] - [0,2,0] - [1,2,0] - [0,0,4] - [1,0,4] - [0,2,4] - [1,2,4] - Note that the resulting rule can be uniquely ID'd by the sum of the toggle array. -""" - - for i in range( nc ): - toggles = [ i & pow( 2, x ) for x in range( nt ) ] - tmp = RuleType( toggles ) - tmp.refresh() - yield tmp - - -class Rule( object ): - """ Base class for the rules""" - - def __init__( self, nToggles, nCombinations, toggles ): - self.numToggles = nToggles - self.numCombinations = nCombinations - self.toggles = toggles - self.repStrings = {} - """ Assumes toggles is set in a way consistent with what is done in GenRules""" - self.id = sum( self.toggles ) - self.repStrings[ "@@POS@@" ] = str( self.id ) - - def GetPosition( self ): - return self.id * 1.0 - - def refresh( self ): - pass - - def replaceString( self, string ): - tmp = string - for s in self.repStrings: - tmp = tmp.replace( s, self.repStrings[ s ] ) - return tmp - - def sedFile( self, fIn, fOut ): - inFile = open( fIn ) - outFile = open( fOut, 'w' ) - for line in inFile: - outFile.write( self.replaceString( line ) ) - inFile.close() - outFile.close() - - def checkTimehist( self ): - # timehist - logger.error( 'checkTimehist method not defined' ) - - -class SetupRules( Rule ): - numToggles = 2 - numCombinations = pow( 2, numToggles ) - - def __init__( self, toggles, minToggle=0, maxToggle=None ): - self.setupMin = minToggle - self.setupMax = maxToggle - Rule.__init__( self, SetupRules.numToggles, SetupRules.numCombinations, toggles ) - - def refresh( self ): - mtoggles = self.toggles[ self.setupMin:self.setupMax ] - - underscoredName = mtoggles[ 0 ] - self.isTenthCycle = mtoggles[ 1 ] - - self.baseName = "foo%i" % self.id - self.baseName = "%s%s" % ( self.baseName, "_001" if underscoredName else "" ) - self.repStrings[ "@@BASE@@" ] = self.baseName - - self.inputDeck = "%s.in" % self.baseName - self.repStrings[ "@@DECK@@" ] = self.inputDeck - - self.restartBaseName = "%s_001" % self.baseName - self.restartName = "%s_%s" % ( self.restartBaseName, "00010" if self.isTenthCycle else "00000" ) - self.repStrings[ "@@RF@@" ] = self.restartName - - super( SetupRules, self ).refresh() - - def GetInputDeckName( self ): - return self.inputDeck - - def GetInitialRestartName( self ): - return self.restartName - - def GetBaseName( self ): - return self.baseName - - -class CommandLineRules( Rule ): - numToggles = 2 - numCombinations = pow( 2, numToggles ) - - def __init__( self, toggles, minToggle=0, maxToggle=None ): - self.clMin = minToggle - self.clMax = maxToggle - Rule.__init__( self, CommandLineRules.numToggles, CommandLineRules.numCombinations, toggles ) - - def refresh( self ): - mtoggles = self.toggles[ self.clMin:self.clMax ] - self.probDefined = mtoggles[ 0 ] # use the -prob flag - self.restartDefined = mtoggles[ 1 ] # use the -rf flag - - # self.prob = "-prob %s" % "@@BASE@@" if self.probDefined else "" - # self.rf = "-rf %s" % "@@RF@@" if self.restartDefined else "" - self.prob = "@@BASE@@" if self.probDefined else "" - self.rf = "@@RF@@" if self.restartDefined else "" - - self.repStrings[ "@@CL_PROB@@" ] = self.prob - self.repStrings[ "@@CL_RF@@" ] = self.rf - - super( CommandLineRules, self ).refresh() - - -def main(): - - generator = GenRules( SetupRules ) - for rule in generator: - vals = ( rule.GetInputDeckName(), rule.GetInitialRestartName(), rule.GetPosition() ) - logger.debug( rule.replaceString( "InputDeck: %s\tRestartFile: %s\tPos: %f" % vals ) ) - - DeclareCompoundRuleClass( "SetupCommand", SetupRules, CommandLineRules ) - logger.debug( SetupCommand.numCombinations ) - generator = GenRules( SetupCommand ) - logger.debug( "compound:" ) - for rule in generator: - vals = ( rule.GetInputDeckName(), rule.GetInitialRestartName(), rule.GetPosition(), rule.prob, rule.rf ) - logger.debug( rule.replaceString( "InputDeck: %s\tRestartFile: %s\tPos: %f\t%s\t%s" % vals ) ) - - return - - dbg = True - parser = optparse.OptionParser() - - # argument to check results of pdldiff script - # parser.add_option("-p", "--pdldiff", type = "string", dest = "pdldiff" ) - ( options, args ) = parser.parse_args() - # assert options.gnuplot - - assert len( args ) == 4 - - base = args[ 0 ] - sourceDeck = args[ 1 ] - atsFile = args[ 2 ] - outdir = args[ 3 ] - assert os.path.exists( sourceDeck ) - assert os.path.exists( atsFile ) - - if os.path.exists( outdir ): - try: - shutil.rmtree( outdir ) - except: - logger.debug( f"Could not remove directory: {outdir}" ) - - # make a directory - try: - os.mkdir( outdir ) - # copy in the input deck and other necessary files for running the problem - shutil.copy( sourceDeck, os.path.join( outdir, "%s.ain" % base ) ) - shutil.copy( "leos1.05.h5", outdir ) - except: - logger.debug( f"Could not create directory: {outdir}" ) - - # copy in the ats file template, replacing appropriate text as we go - outp = open( os.path.join( outdir, "%s.ats" % base ), 'w' ) - inp = open( atsFile, 'r' ) - for line in inp: - line = line.replace( "BASE", base ) - outp.write( line ) - # sub = subprocess.call(['sed', 's/BASE/%s/'%base,atsFile],stdout=outp) - inp.close() - outp.close() - - sys.exit( 0 ) - - -if __name__ == "__main__": - main() diff --git a/geos_ats_package/geos_ats/test_case.py b/geos_ats_package/geos_ats/test_case.py index e3141b0..fd9268f 100644 --- a/geos_ats_package/geos_ats/test_case.py +++ b/geos_ats_package/geos_ats/test_case.py @@ -1,10 +1,12 @@ -import ats # type: ignore[import] +import ats # type: ignore[import] import os import sys import shutil import errno import logging import glob +import inspect +from configparser import ConfigParser test = ats.manager.test testif = ats.manager.testif @@ -13,102 +15,112 @@ from geos_ats.common_utilities import Error, Log, InfoTopic, TextTable, removeLogDirectories from geos_ats.configuration_record import config, globalTestTimings from geos_ats import reporting -from geos_ats import test_modifier TESTS = {} BASELINE_PATH = "baselines" -logger = logging.getLogger( 'geos_ats' ) +logger = logging.getLogger('geos_ats') -class Batch( object ): +class Batch(object): """A class to represent batch options""" - def __init__( self, enabled=True, duration="1h", ppn=0, altname=None ): + def __init__(self, enabled=True, duration="1h", ppn=0, altname=None): - if enabled not in ( True, False ): - Error( "enabled must be a boolean" ) + if enabled not in (True, False): + Error("enabled must be a boolean") self.enabled = enabled self.duration = duration try: - dur = ats.Duration( duration ) + dur = ats.Duration(duration) self.durationSeconds = dur.value except ats.AtsError as e: - logger.error( e ) - Error( "bad time specification: %s" % duration ) + logger.error(e) + Error("bad time specification: %s" % duration) - self.ppn = ppn # processor per node - self.altname = altname # alternate name to use when launcing the batch job + self.ppn = ppn # processor per node + self.altname = altname # alternate name to use when launcing the batch job -class TestCase( object ): +class TestCase(object): """Encapsulates one test case, which may include many steps""" - def __init__( self, name, desc, label=None, labels=None, steps=[], **kw ): + def __init__(self, name, desc, label=None, labels=None, steps=[], **kw): try: - self.initialize( name, desc, label, labels, steps, **kw ) + self.initialize(name, desc, label, labels, steps, **kw) except Exception as e: # make sure error messages get logged, then get out of here. - logging.error( e ) - Log( str( e ) ) - raise Exception( e ) + logging.error(e) + Log(str(e)) + raise Exception(e) - def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch( enabled=False ), **kw ): + def initialize(self, name, desc, label=None, labels=None, steps=[], batch=Batch(enabled=False), **kw): self.name = name self.desc = desc self.batch = batch - action = ats.tests.AtsTest.getOptions().get( "action" ) + # Identify the location of the ats test file + ats_root_dir = ats.tests.AtsTest.getOptions().get("atsRootDir") + self.dirname = '' + for s in inspect.stack(): + if ats_root_dir in s.filename: + self.dirname = os.path.dirname(s.filename) + break - if kw.get( "output_directory", False ): - self.path = os.path.abspath( kw.get( "output_directory" ) ) - else: - self.path = os.path.join( os.getcwd(), self.name ) + if not self.dirname: + logger.warning('Could not find the proper test location... defaulting to current dir') + self.dirname = os.getcwd() + + # Setup paths + working_relpath = os.path.relpath(self.dirname, ats_root_dir) + working_root = ats.tests.AtsTest.getOptions().get("workingDir") + working_dir = os.path.abspath(os.path.join(working_root, working_relpath, self.name)) - self.dirname = os.path.basename( self.path ) + baseline_relpath = working_relpath + baseline_root = ats.tests.AtsTest.getOptions().get("baselineDir") + baseline_directory = os.path.abspath(os.path.join(baseline_root, baseline_relpath, self.name)) + + self.path = working_relpath try: - os.makedirs( self.path ) + os.makedirs(working_dir, exist_ok=True) except OSError as e: - if e.errno == errno.EEXIST and os.path.isdir( self.path ): - pass - else: - logger.debug( e ) - raise Exception() + logger.debug(e) + raise Exception() + # Setup other parameters self.atsGroup = None self.dictionary = {} - self.dictionary.update( kw ) - self.nodoc = self.dictionary.get( "nodoc", False ) - self.statusFile = os.path.abspath( "TestStatus_%s" % self.name ) + self.dictionary.update(kw) + self.nodoc = self.dictionary.get("nodoc", False) self.status = None - self.outname = os.path.join( self.path, "%s.data" % self.name ) - self.errname = os.path.join( self.path, "%s.err" % self.name ) - self.dictionary[ "name" ] = self.name - self.dictionary[ "output_directory" ] = self.path - self.dictionary[ "baseline_dir" ] = os.path.join( os.getcwd(), BASELINE_PATH, self.dirname ) - self.dictionary[ "testcase_out" ] = self.outname - self.dictionary[ "testcase_err" ] = self.errname - self.dictionary[ "testcase_name" ] = self.name - - # check for test cases, testcases can either be the string - # "all" or a list of full test names. - testcases = ats.tests.AtsTest.getOptions().get( "testcases" ) - if testcases == "all": - pass - elif self.name in testcases: - testcases.remove( self.name ) - pass - else: - return + self.outname = f"{self.name}.data" + self.errname = f"{self.name}.err" + self.dictionary["name"] = self.name + self.dictionary["test_directory"] = self.dirname + self.dictionary["output_directory"] = working_dir + self.dictionary["baseline_directory"] = baseline_directory + self.dictionary["testcase_out"] = self.outname + self.dictionary["testcase_err"] = self.errname + self.dictionary["testcase_name"] = self.name + + # Check for previous log information + log_dir = ats.tests.AtsTest.getOptions().get("logDir") + log_file = os.path.join(log_dir, 'test_results.ini') + if os.path.isfile(log_file): + previous_config = ConfigParser() + previous_config.read(log_file) + for k, v in previous_config['Results'].items(): + if self.name in v.split(';'): + self.status = reporting.status_map[k.upper()] if self.name in TESTS: - Error( "Name already in use: %s" % self.name ) + Error("Name already in use: %s" % self.name) - TESTS[ self.name ] = self + TESTS[self.name] = self # check for independent if config.override_np > 0: @@ -117,48 +129,32 @@ def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch # number of processors. self.independent = False else: - self.independent = self.dictionary.get( "independent", False ) - if self.independent not in ( True, False ): - Error( "independent must be either True or False: %s" % str( self.independent ) ) + self.independent = self.dictionary.get("independent", False) + if self.independent not in (True, False): + Error("independent must be either True or False: %s" % str(self.independent)) # check for depends - self.depends = self.dictionary.get( "depends", None ) + self.depends = self.dictionary.get("depends", None) if self.depends == self.name: # This check avoid testcases depending on themselves. self.depends = None - self.handleLabels( label, labels ) + self.handleLabels(label, labels) # complete the steps. # 1. update the steps with data from the dictionary # 2. substeps are inserted into the list of steps (the steps are flattened) for step in steps: - step.update( self.dictionary ) + step.update(self.dictionary) self.steps = [] for step in steps: - step.insertStep( self.steps ) - - # test modifier - modifier = test_modifier.Factory( config.testmodifier ) - newSteps = modifier.modifySteps( self.steps, self.dictionary ) - if newSteps: - # insert the modified steps, including any extra steps that may have - # been added by the modifier. - self.steps = [] - for step in newSteps: - step.insertStep( self.steps ) - for extraStep in step.extraSteps: - extraStep.insertStep( newSteps ) - self.steps = newSteps - else: - Log( "# SKIP test=%s : testmodifier=%s" % ( self.name, config.testmodifier ) ) - self.status = reporting.SKIP - return + step.insertStep(self.steps) # Check for explicit skip flag - if action in ( "run", "rerun", "continue" ): - if self.dictionary.get( "skip", None ): + action = ats.tests.AtsTest.getOptions().get("action") + if action in ("run", "rerun", "continue"): + if self.dictionary.get("skip", None): self.status = reporting.SKIP return @@ -166,7 +162,7 @@ def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch npMax = self.findMaxNumberOfProcessors() if config.filter_maxprocessors != -1: if npMax > config.filter_maxprocessors: - Log( "# FILTER test=%s : max processors(%d > %d)" % ( self.name, npMax, config.filter_maxprocessors ) ) + Log("# FILTER test=%s : max processors(%d > %d)" % (self.name, npMax, config.filter_maxprocessors)) self.status = reporting.FILTERED return @@ -174,89 +170,81 @@ def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch ngpuMax = self.findMaxNumberOfGPUs() # filter based on not enough resources - if action in ( "run", "rerun", "continue" ): + if action in ("run", "rerun", "continue"): tests = [ - not ats.tests.AtsTest.getOptions().get( "testmode" ), not self.batch.enabled, - hasattr( ats.manager.machine, "getNumberOfProcessors" ) + not ats.tests.AtsTest.getOptions().get("testmode"), not self.batch.enabled, + hasattr(ats.manager.machine, "getNumberOfProcessors") ] - if all( tests ): + if all(tests): - totalNumberOfProcessors = getattr( ats.manager.machine, "getNumberOfProcessors" )() + totalNumberOfProcessors = getattr(ats.manager.machine, "getNumberOfProcessors")() if npMax > totalNumberOfProcessors: - Log( "# SKIP test=%s : not enough processors to run (%d > %d)" % - ( self.name, npMax, totalNumberOfProcessors ) ) + Log("# SKIP test=%s : not enough processors to run (%d > %d)" % + (self.name, npMax, totalNumberOfProcessors)) self.status = reporting.SKIP return # If the machine doesn't specify a number of GPUs then it has none. - totalNumberOfGPUs = getattr( ats.manager.machine, "getNumberOfGPUS", lambda: 1e90 )() + totalNumberOfGPUs = getattr(ats.manager.machine, "getNumberOfGPUS", lambda: 1e90)() if ngpuMax > totalNumberOfGPUs: - Log( "# SKIP test=%s : not enough gpus to run (%d > %d)" % - ( self.name, ngpuMax, totalNumberOfGPUs ) ) + Log("# SKIP test=%s : not enough gpus to run (%d > %d)" % (self.name, ngpuMax, totalNumberOfGPUs)) self.status = reporting.SKIP return # filtering test steps based on action - if action in ( "run", "rerun", "continue" ): - checkoption = ats.tests.AtsTest.getOptions().get( "checkoption" ) + if action in ("run", "rerun", "continue"): + checkoption = ats.tests.AtsTest.getOptions().get("checkoption") if checkoption == "none": - self.steps = [ step for step in self.steps if not step.isCheck() ] + self.steps = [step for step in self.steps if not step.isCheck()] elif action == "check": - self.steps = [ step for step in self.steps if step.isCheck() ] + self.steps = [step for step in self.steps if step.isCheck()] # move all the delayed steps to the end reorderedSteps = [] for step in self.steps: if not step.isDelayed(): - reorderedSteps.append( step ) + reorderedSteps.append(step) for step in self.steps: if step.isDelayed(): - reorderedSteps.append( step ) + reorderedSteps.append(step) self.steps = reorderedSteps # filter based on previous results: - if action in ( "run", "check", "continue" ): - # read the status file - self.status = test_caseStatus( self ) - + if action in ("run", "check", "continue"): # if previously passed then skip - if self.status.isPassed(): - Log( "# SKIP test=%s (previously passed)" % ( self.name ) ) + if self.status == reporting.PASS: + Log("# SKIP test=%s (previously passed)" % (self.name)) # don't set status here, as we want the report to reflect the pass return if action == "continue": - if self.status.isFailed(): - Log( "# SKIP test=%s (previously failed)" % ( self.name ) ) + if self.status == reporting.FAILED: + Log("# SKIP test=%s (previously failed)" % (self.name)) # don't set status here, as we want the report to reflect the pass return # Perform the action: - if action in ( "run", "continue" ): - Log( "# run test=%s" % ( self.name ) ) + if action in ("run", "continue"): + Log("# run test=%s" % (self.name)) self.testCreate() elif action == "rerun": - Log( "# rerun test=%s" % ( self.name ) ) + Log("# rerun test=%s" % (self.name)) self.testCreate() elif action == "check": - Log( "# check test=%s" % ( self.name ) ) + Log("# check test=%s" % (self.name)) self.testCreate() elif action == "commands": self.testCommands() - elif action == "reset": - if self.testReset(): - Log( "# reset test=%s" % ( self.name ) ) - elif action == "clean": - Log( "# clean test=%s" % ( self.name ) ) + Log("# clean test=%s" % (self.name)) self.testClean() elif action == "veryclean": - Log( "# veryclean test=%s" % ( self.name ) ) + Log("# veryclean test=%s" % (self.name)) self.testVeryClean() elif action == "rebaseline": @@ -268,162 +256,148 @@ def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch elif action == "list": self.testList() - elif action in ( "report" ): - self.testReport() - else: - Error( "Unknown action?? %s" % action ) + Error("Unknown action?? %s" % action) - def resultPaths( self, step=None ): + def resultPaths(self, step=None): """Return the paths to output files for the testcase. Used in reporting""" - paths = [ self.outname, self.errname ] + paths = [self.outname, self.errname] if step: for x in step.resultPaths(): - fullpath = os.path.join( self.path, x ) - if os.path.exists( fullpath ): - paths.append( fullpath ) + fullpath = os.path.join(self.path, x) + if os.path.exists(fullpath): + paths.append(fullpath) return paths - def testReset( self ): - self.status = test_caseStatus( self ) - ret = self.status.resetFailed() - self.status.writeStatusFile() - return ret - - def testClean( self ): - if os.path.exists( self.statusFile ): - os.remove( self.statusFile ) - if os.path.exists( self.outname ): - os.remove( self.outname ) - if os.path.exists( self.errname ): - os.remove( self.errname ) + def testClean(self): + if os.path.exists(self.outname): + os.remove(self.outname) + if os.path.exists(self.errname): + os.remove(self.errname) for step in self.steps: step.clean() - def testVeryClean( self ): + def testVeryClean(self): - def _remove( path ): - delpaths = glob.glob( path ) + def _remove(path): + delpaths = glob.glob(path) for p in delpaths: - if os.path.exists( p ): + if os.path.exists(p): try: - if os.path.isdir( p ): - shutil.rmtree( p ) + if os.path.isdir(p): + shutil.rmtree(p) else: - os.remove( p ) + os.remove(p) except OSError: - pass # so that two simultaneous clean operations don't fail + pass # so that two simultaneous clean operations don't fail # clean self.testClean() # remove log directories - removeLogDirectories( os.getcwd() ) + removeLogDirectories(os.getcwd()) # remove extra files - if len( self.steps ) > 0: - _remove( config.report_html_file ) - _remove( config.report_text_file ) - _remove( self.path ) - _remove( "*.core" ) - _remove( "core" ) - _remove( "core.*" ) - _remove( "vgcore.*" ) - _remove( "*.btr" ) - _remove( "TestLogs*" ) - _remove( "*.ini" ) - - def findMaxNumberOfProcessors( self ): + if len(self.steps) > 0: + _remove(config.report_html_file) + _remove(config.report_text_file) + _remove(self.path) + _remove("*.core") + _remove("core") + _remove("core.*") + _remove("vgcore.*") + _remove("*.btr") + _remove("TestLogs*") + _remove("*.ini") + + def findMaxNumberOfProcessors(self): npMax = 1 for step in self.steps: - np = getattr( step.p, "np", 1 ) - npMax = max( np, npMax ) + np = getattr(step.p, "np", 1) + npMax = max(np, npMax) return npMax - def findMaxNumberOfGPUs( self ): + def findMaxNumberOfGPUs(self): gpuMax = 0 for step in self.steps: - ngpu = getattr( step.p, "ngpu", 0 ) * getattr( step.p, "np", 1 ) - gpuMax = max( ngpu, gpuMax ) + ngpu = getattr(step.p, "ngpu", 0) * getattr(step.p, "np", 1) + gpuMax = max(ngpu, gpuMax) return gpuMax - def testCreate( self ): + def testCreate(self): atsTest = None - keep = ats.tests.AtsTest.getOptions().get( "keep" ) + keep = ats.tests.AtsTest.getOptions().get("keep") # remove outname - if os.path.exists( self.outname ): - os.remove( self.outname ) - if os.path.exists( self.errname ): - os.remove( self.errname ) - - # create the status file - if self.status is None: - self.status = test_caseStatus( self ) + if os.path.exists(self.outname): + os.remove(self.outname) + if os.path.exists(self.errname): + os.remove(self.errname) maxnp = 1 - for stepnum, step in enumerate( self.steps ): - np = getattr( step.p, "np", 1 ) - maxnp = max( np, maxnp ) + for stepnum, step in enumerate(self.steps): + np = getattr(step.p, "np", 1) + maxnp = max(np, maxnp) if config.priority == "processors": priority = maxnp elif config.priority == "timing": - priority = max( globalTestTimings.get( self.name, 1 ) * maxnp, 1 ) + priority = max(globalTestTimings.get(self.name, 1) * maxnp, 1) else: priority = 1 # start a group - ats.tests.AtsTest.newGroup( priority=priority ) + ats.tests.AtsTest.newGroup(priority=priority) # keep a reference to the ats test group self.atsGroup = ats.tests.AtsTest.group # if depends if self.depends: - priorTestCase = TESTS.get( self.depends, None ) + priorTestCase = TESTS.get(self.depends, None) if priorTestCase is None: - Log( "Warning: Test %s depends on testcase %s, which is not scheduled to run" % - ( self.name, self.depends ) ) + Log("Warning: Test %s depends on testcase %s, which is not scheduled to run" % + (self.name, self.depends)) else: if priorTestCase.steps: - atsTest = getattr( priorTestCase.steps[ -1 ], "atsTest", None ) + atsTest = getattr(priorTestCase.steps[-1], "atsTest", None) - for stepnum, step in enumerate( self.steps ): + for stepnum, step in enumerate(self.steps): - np = getattr( step.p, "np", 1 ) - ngpu = getattr( step.p, "ngpu", 0 ) + np = getattr(step.p, "np", 1) + ngpu = getattr(step.p, "ngpu", 0) executable = step.executable() args = step.makeArgs() # set the label - label = "%s/%s_%d_%s" % ( self.dirname, self.name, stepnum + 1, step.label() ) + # label = "%s/%s_%d_%s" % (self.dirname, self.name, stepnum + 1, step.label()) + label = "%s_%d_%s" % (self.name, stepnum + 1, step.label()) # call either 'test' or 'testif' if atsTest is None: - func = lambda *a, **k: test( *a, **k ) + func = lambda *a, **k: test(*a, **k) else: - func = lambda *a, **k: testif( atsTest, *a, **k ) + func = lambda *a, **k: testif(atsTest, *a, **k) # timelimit kw = {} if self.batch.enabled: - kw[ "timelimit" ] = self.batch.duration + kw["timelimit"] = self.batch.duration - if ( step.timelimit() and not config.override_timelimit ): - kw[ "timelimit" ] = step.timelimit() + if (step.timelimit() and not config.override_timelimit): + kw["timelimit"] = step.timelimit() else: - kw[ "timelimit" ] = config.default_timelimit - - atsTest = func( executable=executable, - clas=args, - np=np, - ngpu=ngpu, - label=label, - serial=( not step.useMPI() and not config.script_launch ), - independent=self.independent, - batch=self.batch.enabled, - **kw ) + kw["timelimit"] = config.default_timelimit + + atsTest = func(executable=executable, + clas=args, + np=np, + ngpu=ngpu, + label=label, + serial=(not step.useMPI() and not config.script_launch), + independent=self.independent, + batch=self.batch.enabled, + **kw) # ats test gets a reference to the TestStep and the TestCase atsTest.geos_atsTestCase = self @@ -432,60 +406,53 @@ def testCreate( self ): # TestStep gets a reference to the atsTest step.atsTest = atsTest - # Add the step the test status object - self.status.addStep( atsTest ) - # set the expected result if step.expectedResult() == "FAIL" or step.expectedResult() is False: atsTest.expectedResult = ats.FAILED # The ATS does not permit tests to depend on failed tests. # therefore we need to break here - self.steps = self.steps[ :stepnum + 1 ] + self.steps = self.steps[:stepnum + 1] break # end the group ats.tests.AtsTest.endGroup() - self.status.resetFailed() - self.status.writeStatusFile() - - def commandLine( self, step ): + def commandLine(self, step): args = [] executable = step.executable() commandArgs = step.makeArgs() - assert isinstance( commandArgs, list ) + assert isinstance(commandArgs, list) for a in commandArgs: if " " in a: - args.append( '"%s"' % a ) + args.append('"%s"' % a) else: - args.append( a ) + args.append(a) - argsstr = " ".join( args ) + argsstr = " ".join(args) return executable + " " + argsstr - def testCommands( self ): - Log( "\n# commands test=%s" % ( self.name ) ) + def testCommands(self): + Log("\n# commands test=%s" % (self.name)) for step in self.steps: - np = getattr( step.p, "np", 1 ) + np = getattr(step.p, "np", 1) usempi = step.useMPI() - stdout = getattr( step.p, "stdout", None ) - commandline = self.commandLine( step ).replace( '%%', '%' ) + stdout = getattr(step.p, "stdout", None) + commandline = self.commandLine(step).replace('%%', '%') if stdout: - Log( "np=%d %s > %s" % ( np, commandline, stdout ) ) + Log("np=%d %s > %s" % (np, commandline, stdout)) else: - Log( "np=%d %s" % ( np, commandline ) ) + Log("np=%d %s" % (np, commandline)) - def testRebaseline( self ): + def testRebaseline(self): rebaseline = True if config.rebaseline_ask: while 1: if config.rebaseline_undo: - logger.info( f"Are you sure you want to undo the rebaseline for TestCase '{self.name}'?", - flush=True ) + logger.info(f"Are you sure you want to undo the rebaseline for TestCase '{self.name}'?", flush=True) else: - logger.info( f"Are you sure you want to rebaseline TestCase '{self.name}'?", flush=True ) + logger.info(f"Are you sure you want to rebaseline TestCase '{self.name}'?", flush=True) - x = input( '[y/n] ' ) + x = input('[y/n] ') x = x.strip() if x == "y": break @@ -493,31 +460,27 @@ def testRebaseline( self ): rebaseline = False break else: - Log( "\n# rebaseline test=%s" % ( self.name ) ) + Log("\n# rebaseline test=%s" % (self.name)) if rebaseline: for step in self.steps: step.rebaseline() - def testRebaselineFailed( self ): + def testRebaselineFailed(self): config.rebaseline_ask = False - self.status = test_caseStatus( self ) - if self.status.isFailed(): + if self.status == reporting.FAILED: self.testRebaseline() - def testList( self ): - Log( "# test=%s : labels=%s" % ( self.name.ljust( 32 ), " ".join( self.labels ) ) ) + def testList(self): + Log("# test=%s : labels=%s" % (self.name.ljust(32), " ".join(self.labels))) - def testReport( self ): - self.status = test_caseStatus( self ) - - def handleLabels( self, label, labels ): + def handleLabels(self, label, labels): """set the labels, and verify they are known to the system, the avoid typos""" if labels is not None and label is not None: - Error( "specify only one of 'label' or 'labels'" ) + Error("specify only one of 'label' or 'labels'") if label is not None: - self.labels = [ label ] + self.labels = [label] elif labels is not None: self.labels = labels else: @@ -525,207 +488,9 @@ def handleLabels( self, label, labels ): for x in self.labels: if x not in testLabels: - Error( f"unknown label {x}. run 'geos_ats -i labels' for a list" ) - - -class test_caseStatus( object ): - - def __init__( self, testCase ): - self.testCase = testCase - self.statusFile = self.testCase.statusFile - self.readStatusFile() - - def readStatusFile( self ): - if os.path.exists( self.statusFile ): - f = open( self.statusFile, "r" ) - self.status = [ eval( x.strip() ) for x in f.readlines() ] - f.close() - else: - self.status = [] - - def writeStatusFile( self ): - assert self.status is not None - - with open( self.statusFile, "w" ) as f: - f.writelines( [ str( s ) + '\n' for s in self.status ] ) - - def testKey( self, step ): - np = getattr( step.p, "np", 1 ) - key = str( ( np, step.label(), step.executable(), step.makeArgsForStatusKey() ) ) - return key - - def testData( self, test ): - key = self.testKey( test.geos_atsTestStep ) - result = test.status - - if result == ats.PASSED and test.expectedResult == ats.FAILED: - result = ats.FAILED - endTime = getattr( test, "endTime", None ) - startTime = getattr( test, "startTime", None ) - data = {} - data[ "key" ] = key - data[ "result" ] = str( result ) - data[ "startTime" ] = startTime - data[ "endTime" ] = endTime - return key, data - - def findStep( self, step ): - key = self.testKey( step ) - for s in self.status: - if key in s[ "key" ]: - return s - - return None - - def isPassed( self ): - for step in self.testCase.steps: - status = self.findStep( step ) - if status: - if status[ "result" ] == "EXPT": - # do not continue after an expected fail - return True - elif status[ "result" ] == "PASS": - continue - else: - return False - else: - return False - return True - - def isFailed( self ): - for step in self.testCase.steps: - status = self.findStep( step ) - if status: - if status[ "result" ] == "EXPT": - # do not continue after an expected fail - return False - elif status[ "result" ] == "PASS": - continue - elif status[ "result" ] == "FAIL": - return True - else: - return False - else: - return False - return False - - def resetFailed( self ): - ret = False - for step in self.testCase.steps: - status = self.findStep( step ) - if status: - if status[ "result" ] == "EXPT": - # do not continue after an expected fail - status[ "result" ] = "INIT" - ret = True - elif status[ "result" ] == "FAIL": - status[ "result" ] = "INIT" - ret = True - else: - continue - return ret - - def totalTime( self ): - total = 0.0 - for step in self.testCase.steps: - status = self.findStep( step ) - if status: - steptime = status[ "endTime" ] - status[ "startTime" ] - assert steptime >= 0 - total += steptime - return total - - def addStep( self, test ): - key, data = self.testData( test ) - found = False - for s in self.status: - if key == s[ "key" ]: - found = True - break - - if not found: - self.status.append( data ) - - def noteEnd( self, test ): - """Update the TestStatus file for this test case""" - # update the status - key, data = self.testData( test ) - - self.readStatusFile() - found = False - for i, s in enumerate( self.status ): - if key in s[ "key" ]: - self.status[ i ] = data - found = True - break - - if not found: - logger.warning( f"NOT FOUND: {key} {self.statusFile}" ) - assert found - self.writeStatusFile() - - # append to stdout/stderr file - for stream in ( "outname", "errname" ): - sourceFile = getattr( test, stream ) - dataFile = getattr( self.testCase, stream ) - - if not os.path.exists( sourceFile ): - continue - - # Append to the TestCase files - f1 = open( dataFile, "a" ) - f2 = open( sourceFile, "r" ) - f1.write( ":" * 20 + "\n" ) - f1.write( self.testCase.commandLine( test.geos_atsTestStep ) + "\n" ) - f1.write( ":" * 20 + "\n" ) - f1.write( f2.read() ) - f1.close() - f2.close() - - # Copy the stdout or stderr, if requested - if stream == "outname": - destFile = test.geos_atsTestStep.saveOut() - else: - destFile = test.geos_atsTestStep.saveErr() - - if destFile: - destFile = os.path.join( self.testCase.path, destFile ) - shutil.copy( sourceFile, destFile ) - - # If this is the last step (and it passed), clean the temporary files - if config.clean_on_pass: - lastStep = ( test.geos_atsTestStep is self.testCase.steps[ -1 ] ) - if lastStep and self.isPassed(): - for step in self.testCase.steps: - step.clean() - - -def infoTestCase( *args ): - """This function is used to print documentation about the testcase""" - - topic = InfoTopic( "testcase" ) - topic.startBanner() - - logger.info( "Required parameters" ) - table = TextTable( 3 ) - table.addRow( "name", "required", "The name of the test problem" ) - table.addRow( "desc", "required", "A brief description" ) - table.addRow( "label", "required", "A string or sequence of strings to tag the TestCase. See info topic 'labels'" ) - table.addRow( "owner", "optional", - "A string or sequence of strings of test owners for this TestCase. See info topic 'owners'" ) - table.addRow( - "batch", "optional", "A Batch object. Batch(enabled=True, duration='1h', ppn=0, altname=None)." - " ppn is short for processors per node (0 means to use the global default)." - " altname will be used for the batch job's name if supplied, otherwise the full name of the test case is used." - ), - table.addRow( "depends", "optional", "The name of a testcase that this testcase depends" ) - table.addRow( "steps", "required", "A sequence of TestSteps objects. See info topic 'teststeps'" ) - - table.printTable() - - topic.endBanner() + Error(f"unknown label {x}. run 'geos_ats -i labels' for a list") # Make available to the tests -ats.manager.define( TestCase=TestCase ) -ats.manager.define( Batch=Batch ) +ats.manager.define(TestCase=TestCase) +ats.manager.define(Batch=Batch) diff --git a/geos_ats_package/geos_ats/test_steps.py b/geos_ats_package/geos_ats/test_steps.py index 57d39ac..671465f 100644 --- a/geos_ats_package/geos_ats/test_steps.py +++ b/geos_ats_package/geos_ats/test_steps.py @@ -1,5 +1,5 @@ import os -import ats # type: ignore[import] +import ats # type: ignore[import] import glob import shutil import sys @@ -11,10 +11,10 @@ from geos_ats.common_utilities import Error, Log from geos_ats.configuration_record import config -logger = logging.getLogger( 'geos_ats' ) +logger = logging.getLogger('geos_ats') -def getGeosProblemName( deck, name ): +def getGeosProblemName(deck, name): """ Given an input deck and a name return the prefix Geos will attatch to it's output files. @@ -22,15 +22,15 @@ def getGeosProblemName( deck, name ): NAME [in]: The name given to Geos on the command line. """ if name is None: - if deck.endswith( ".xml" ): - return os.path.basename( deck )[ :-4 ] + if deck.endswith(".xml"): + return os.path.basename(deck)[:-4] else: - return os.path.basename( deck ) + return os.path.basename(deck) else: return name -def findMaxMatchingFile( file_path ): +def findMaxMatchingFile(file_path): """ Given a path FILE_PATH where the base name of FILE_PATH is treated as a regular expression find and return the path of the greatest matching file/folder or None if no match is found. @@ -43,31 +43,31 @@ def findMaxMatchingFile( file_path ): "test/plot_*.hdf5" will return the file with the greatest name in the ./test directory that begins with "plot_" and ends with ".hdf5". """ - file_directory, pattern = os.path.split( file_path ) + file_directory, pattern = os.path.split(file_path) if file_directory == "": file_directory = "." - if not os.path.isdir( file_directory ): + if not os.path.isdir(file_directory): return None max_match = '' - pattern = re.compile( pattern ) - for file in os.listdir( file_directory ): - if pattern.match( file ) is not None: - max_match = max( file, max_match ) + pattern = re.compile(pattern) + for file in os.listdir(file_directory): + if pattern.match(file) is not None: + max_match = max(file, max_match) if not max_match: return None - return os.path.join( file_directory, max_match ) + return os.path.join(file_directory, max_match) -class TestParam( object ): +class TestParam(object): """ A class that describes a parameter of a test step. """ - def __init__( self, name, doc, default=None ): + def __init__(self, name, doc, default=None): self.name = name self.doc = doc self.default = default @@ -78,7 +78,7 @@ def __init__( self, name, doc, default=None ): ################################################################################ -class TestStepBase( object ): +class TestStepBase(object): """ The base clase for a test step. """ @@ -87,65 +87,67 @@ class TestStepBase( object ): TestParam( "clean", "additional files to remove during the clean action." " clean may be a string or a list of strings. The strings may contain" - " wildcard characters." ), + " wildcard characters."), TestParam( "timelimit", "maximum time the step is allowed to run before it is considerend a TIMEOUT." - " Specified as a string such as: 1h30m, 60m, etc.", "None" ), - TestParam( "stdout", "If set, the stdout will be placed in the named file, in the TestCase directory", None ), - TestParam( "stderr", "If set, the stderr will be placed in the named file, in the TestCase directory", None ), - TestParam( "expectedResult", "'PASS' or 'FAIL'", "'PASS'" ), - TestParam( "delayed", "Whether execution of the step will be delayed", "False" ), - TestParam( "minor", "Whether failure of this step is minor issue", "False" ), + " Specified as a string such as: 1h30m, 60m, etc.", "None"), + TestParam("stdout", "If set, the stdout will be placed in the named file, in the TestCase directory", None), + TestParam("stderr", "If set, the stderr will be placed in the named file, in the TestCase directory", None), + TestParam("expectedResult", "'PASS' or 'FAIL'", "'PASS'"), + TestParam("delayed", "Whether execution of the step will be delayed", "False"), + TestParam("minor", "Whether failure of this step is minor issue", "False"), ) commonParams = { "name": - TestParam( "name", "Used to give other params default values.", "The name of the TestCase" ), + TestParam("name", "Used to give other params default values.", "The name of the TestCase"), "deck": - TestParam( "deck", "Name of the input file. Setting deck to False means no deck is used.", ".in" ), + TestParam("deck", "Name of the input file. Setting deck to False means no deck is used.", ".in"), "np": - TestParam( "np", "The number of processors to run on.", 1 ), + TestParam("np", "The number of processors to run on.", 1), "ngpu": - TestParam( "ngpu", "The number of gpus to run on when available.", 0 ), + TestParam("ngpu", "The number of gpus to run on when available.", 0), "check": TestParam( "check", "True or False. determines whether the default checksteps will " - "be automatically be added after this step.", "True" ), - "baseline_dir": - TestParam( "baseline_dir", "subdirectory of config.testbaseline_dir where the test " - "baselines are located.", "" ), + "be automatically be added after this step.", "True"), + "test_directory": + TestParam("test_directory", "subdirectory holding the test definitions", ""), + "baseline_directory": + TestParam("baseline_directory", "subdirectory of config.testbaseline_directory where the test " + "baselines are located.", ""), "output_directory": - TestParam( "output_directory", "subdirectory where the test log, params, rin, and " - "timehistory files are located.", "" ), + TestParam("output_directory", "subdirectory where the test log, params, rin, and " + "timehistory files are located.", ""), "rebaseline": TestParam( "rebaseline", "additional files to rebaseline during the rebaseline action." - " rebaseline may be a string or a list of strings." ), + " rebaseline may be a string or a list of strings."), "timehistfile": - TestParam( "timehistfile", "name of the file containing all the" - " timehist curves.", "testmode..ul" ), + TestParam("timehistfile", "name of the file containing all the" + " timehist curves.", "testmode..ul"), "basetimehistfile": - TestParam( "basetimehistfile", "location to the baseline timehistfile", - "//" ), + TestParam("basetimehistfile", "location to the baseline timehistfile", + "//"), "allow_rebaseline": TestParam( "allow_rebaseline", "True if the second file should be re-baselined during a rebaseline action." - " False if the second file should not be rebaselined.", "True" ), + " False if the second file should not be rebaselined.", "True"), "testcase_name": - TestParam( "testcase_name", "The name of the testcase" ), + TestParam("testcase_name", "The name of the testcase"), "testcase_out": - TestParam( "testcase_out", "The file where stdout for the testcase is accumulated" ), + TestParam("testcase_out", "The file where stdout for the testcase is accumulated"), } # namespace to place the params. - class Params( object ): + class Params(object): pass - def __init__( self ): + def __init__(self): self.p = TestStepBase.Params() self.extraSteps = [] - def setParams( self, dictionary, paramlist ): + def setParams(self, dictionary, paramlist): """ Given a list of parameters PARAMLIST and a DICTIONARY set the parameters in PARAMLIST that are also in DICTIONARY but do not yet have a value. @@ -155,38 +157,38 @@ def setParams( self, dictionary, paramlist ): """ for p in paramlist: pname = p.name - if getattr( self.p, pname, None ) is None: - setattr( self.p, pname, dictionary.get( pname, None ) ) + if getattr(self.p, pname, None) is None: + setattr(self.p, pname, dictionary.get(pname, None)) - def requireParam( self, param ): + def requireParam(self, param): """ Require that the given parameter is defined and not None. PARAM [in]: The name of the parameter to check. """ - if not hasattr( self.p, param ): - Error( "%s must be given" % param ) - if getattr( self.p, param ) is None: - Error( "%s must not be None" % param ) + if not hasattr(self.p, param): + Error("%s must be given" % param) + if getattr(self.p, param) is None: + Error("%s must not be None" % param) - def insertStep( self, steps ): + def insertStep(self, steps): """ Insert into the list of steps STEPS. STEPS [in/out]: The list of steps to insert into. """ - steps.append( self ) + steps.append(self) - def makeArgs( self ): + def makeArgs(self): """ Return the command line arguments for this step. """ - raise Error( "Must implement this" ) + raise Error("Must implement this") - def makeArgsForStatusKey( self ): + def makeArgsForStatusKey(self): return self.makeArgs() - def setStdout( self, dictionary ): + def setStdout(self, dictionary): """ Generate a unique stdout file using DICTIONARY. @@ -197,26 +199,26 @@ def setStdout( self, dictionary ): self.p.stdout = stepname + "." + self.label() + ".out" if self.p.stdout in dictionary: - Log( "Non-unique name for stdout file: %s" % self.p.stdout ) + Log("Non-unique name for stdout file: %s" % self.p.stdout) else: - dictionary[ self.p.stdout ] = 1 + dictionary[self.p.stdout] = 1 - def update( self, dictionary ): + def update(self, dictionary): """ Update parameters using DICTIONARY. All parameters which already have values are not updated. Called by the owning TestCase to pass along it's arguments. DICTIONARY [in]: The dictionary used to update the parameters. """ - raise Error( "Must implement this" ) + raise Error("Must implement this") - def clean( self ): + def clean(self): """ Remove files generated by this test step. """ - self._clean( [] ) + self._clean([]) - def _clean( self, paths, noclean=[] ): + def _clean(self, paths, noclean=[]): """ Delete files/folders in PATHS and self.p.clean as well as stdout and stderr but not in NOCLEAN. Paths to delete can have wildcard characters '*'. @@ -224,21 +226,21 @@ def _clean( self, paths, noclean=[] ): PATHS [in]: Paths to remove, can have wildcard characters. NOCLEAN [in]: Paths to ignore, can not have wildcard characters. """ - self._remove( paths, noclean ) + self._remove(paths, noclean) - if hasattr( self.p, "clean" ): + if hasattr(self.p, "clean"): if self.p.clean is not None: - self._remove( self.p.clean, noclean ) - if hasattr( self.p, "stdout" ): + self._remove(self.p.clean, noclean) + if hasattr(self.p, "stdout"): if self.p.stdout is not None: - self._remove( self.p.stdout, noclean ) - self._remove( "%s.*" % self.p.stdout, noclean ) - if hasattr( self.p, "stderr" ): + self._remove(self.p.stdout, noclean) + self._remove("%s.*" % self.p.stdout, noclean) + if hasattr(self.p, "stderr"): if self.p.stderr is not None: - self._remove( self.p.stderr, noclean ) - self._remove( "%s.*" % self.p.stderr, noclean ) + self._remove(self.p.stderr, noclean) + self._remove("%s.*" % self.p.stderr, noclean) - def _remove( self, paths, noclean ): + def _remove(self, paths, noclean): """ Delete files/folders in PATHS but not in NOCLEAN. Paths to delete can have wildcard characters '*'. @@ -246,100 +248,100 @@ def _remove( self, paths, noclean ): PATHS [in]: Paths to remove, can have wildcard characters. NOCLEAN [in]: Paths to ignore, can not have wildcard characters. """ - if isinstance( paths, str ): - paths = [ paths ] + if isinstance(paths, str): + paths = [paths] for path in paths: if self.getTestMode(): - Log( "clean: %s" % path ) + Log("clean: %s" % path) else: - delpaths = glob.glob( path ) + delpaths = glob.glob(path) for p in delpaths: if p in noclean: continue try: - if os.path.isdir( p ): - shutil.rmtree( p ) + if os.path.isdir(p): + shutil.rmtree(p) else: - os.remove( p ) + os.remove(p) except OSError as e: - logger.debug( e ) # so that two simultaneous clean operations don't fail + logger.debug(e) # so that two simultaneous clean operations don't fail - def getCheckOption( self ): - return ats.tests.AtsTest.getOptions().get( "checkoption" ) + def getCheckOption(self): + return ats.tests.AtsTest.getOptions().get("checkoption") - def getTestMode( self ): - return ats.tests.AtsTest.getOptions().get( "testmode" ) + def getTestMode(self): + return ats.tests.AtsTest.getOptions().get("testmode") - def isCheck( self ): + def isCheck(self): """ Return True iff this is a check step. """ return False - def isDelayed( self ): + def isDelayed(self): """ Return True iff this step and all substeps should be moved to the end of the test case. """ return self.p.delayed - def isMinor( self ): + def isMinor(self): """ Return True iff failure of this step is a minor issue. """ return self.p.minor - def saveOut( self ): + def saveOut(self): return self.p.stdout - def saveErr( self ): + def saveErr(self): return self.p.stderr - def useMPI( self ): + def useMPI(self): """ Return True iff this step uses MPI. """ return False - def resultPaths( self ): + def resultPaths(self): """ Return a list of paths generated by this step. """ return [] - def timelimit( self ): - return getattr( self.p, "timelimit", None ) + def timelimit(self): + return getattr(self.p, "timelimit", None) - def expectedResult( self ): - return getattr( self.p, "expectedResult", "PASS" ) + def expectedResult(self): + return getattr(self.p, "expectedResult", "PASS") - def handleCommonParams( self ): + def handleCommonParams(self): """ Handle all the common parameters. """ - if hasattr( self.p, "np" ): + if hasattr(self.p, "np"): if self.p.np is None: self.p.np = 1 - if hasattr( self.p, "ngpu" ): + if hasattr(self.p, "ngpu"): if self.p.ngpu is None: self.p.ngpu = 0 - if hasattr( self.p, "check" ): + if hasattr(self.p, "check"): if self.p.check is None: self.p.check = True - if hasattr( self.p, "allow_rebaseline" ): + if hasattr(self.p, "allow_rebaseline"): if self.p.allow_rebaseline is None: self.p.allow_rebaseline = True - def executable( self ): + def executable(self): """ Return the path of the executable used to execute this step. """ - raise Error( "Must implement this" ) + raise Error("Must implement this") - def rebaseline( self ): + def rebaseline(self): """ Rebaseline this test step. """ @@ -349,23 +351,23 @@ def rebaseline( self ): ################################################################################ # CheckTestStepBase ################################################################################ -class CheckTestStepBase( TestStepBase ): +class CheckTestStepBase(TestStepBase): """ Base class for check test steps. """ - checkParams = ( TestParam( + checkParams = (TestParam( "enabled", "True or False. determines whether this step is enabled. Often times used to turn off automatic check steps", - "True" ), ) + "True"), ) - def isCheck( self ): + def isCheck(self): return True - def handleCommonParams( self ): - TestStepBase.handleCommonParams( self ) + def handleCommonParams(self): + TestStepBase.handleCommonParams(self) - if hasattr( self.p, "enabled" ): + if hasattr(self.p, "enabled"): if self.p.enabled is None: self.p.enabled = True @@ -373,7 +375,7 @@ def handleCommonParams( self ): ################################################################################ # geos ################################################################################ -class geos( TestStepBase ): +class geos(TestStepBase): """ Class for the Geos test step. """ @@ -384,20 +386,20 @@ class geos( TestStepBase ): command = "geosx [-i ] [-r ] [-x ] [-y ] [-z ] [-s ] [-n ] [-o ] [ --suppress-pinned ] " params = TestStepBase.defaultParams + ( - TestStepBase.commonParams[ "name" ], TestStepBase.commonParams[ "deck" ], TestStepBase.commonParams[ "np" ], - TestStepBase.commonParams[ "ngpu" ], TestStepBase.commonParams[ "check" ], - TestStepBase.commonParams[ "baseline_dir" ], TestStepBase.commonParams[ "output_directory" ], - TestParam( "restart_file", "The name of the restart file." ), - TestParam( "x_partitions", "The number of partitions in the x direction." ), - TestParam( "y_partitions", "The number of partitions in the y direction." ), - TestParam( "z_partitions", - "The number of partitions in the z direction." ), TestParam( "schema_level", "The schema level." ), - TestParam( "suppress-pinned", "Option to suppress use of pinned memory for MPI buffers." ), - TestParam( "trace_data_migration", "Trace host-device data migration." ) ) + TestStepBase.commonParams["name"], TestStepBase.commonParams["deck"], TestStepBase.commonParams["np"], + TestStepBase.commonParams["ngpu"], TestStepBase.commonParams["check"], + TestStepBase.commonParams["test_directory"], TestStepBase.commonParams["baseline_directory"], TestStepBase.commonParams["output_directory"], + TestParam("restart_file", "The name of the restart file."), + TestParam("x_partitions", "The number of partitions in the x direction."), + TestParam("y_partitions", "The number of partitions in the y direction."), + TestParam("z_partitions", + "The number of partitions in the z direction."), TestParam("schema_level", "The schema level."), + TestParam("suppress-pinned", "Option to suppress use of pinned memory for MPI buffers."), + TestParam("trace_data_migration", "Trace host-device data migration.")) - checkstepnames = [ "restartcheck" ] + checkstepnames = ["restartcheck"] - def __init__( self, restartcheck_params=None, curvecheck_params=None, **kw ): + def __init__(self, restartcheck_params=None, curvecheck_params=None, **kw): """ Initializes the parameters of this test step, and creates the appropriate check steps. @@ -406,112 +408,113 @@ def __init__( self, restartcheck_params=None, curvecheck_params=None, **kw ): KEYWORDS [in]: Dictionary that is used to set the parameters of this step and also all check steps. """ - TestStepBase.__init__( self ) - self.setParams( kw, self.params ) + TestStepBase.__init__(self) + self.setParams(kw, self.params) checkOption = self.getCheckOption() self.checksteps = [] - if checkOption in [ "all", "curvecheck" ]: + if checkOption in ["all", "curvecheck"]: if curvecheck_params is not None: - self.checksteps.append( curvecheck( curvecheck_params, **kw ) ) + self.checksteps.append(curvecheck(curvecheck_params, **kw)) - if checkOption in [ "all", "restartcheck" ]: + if checkOption in ["all", "restartcheck"]: if restartcheck_params is not None: - self.checksteps.append( restartcheck( restartcheck_params, **kw ) ) + self.checksteps.append(restartcheck(restartcheck_params, **kw)) - def label( self ): + def label(self): return "geos" - def useMPI( self ): + def useMPI(self): return True - def executable( self ): + def executable(self): # python = os.path.join(binDir, "..", "lib", "PYGEOS", "bin", "python3") # pygeosDir = os.path.join(binDir, "..", "..", "src", "pygeos") # return python + " -m mpi4py " + os.path.join( pygeosDir, "reentrantTest.py" ) # return python + " -m mpi4py " + os.path.join( pygeosDir, "test.py" ) # return config.geos_bin_dir - return os.path.join( config.geos_bin_dir, 'geosx' ) + return os.path.join(config.geos_bin_dir, 'geosx') - def update( self, dictionary ): - self.setParams( dictionary, self.params ) + def update(self, dictionary): + self.setParams(dictionary, self.params) - self.requireParam( "deck" ) - self.requireParam( "name" ) - self.requireParam( "baseline_dir" ) - self.requireParam( "output_directory" ) + self.requireParam("deck") + self.requireParam("name") + self.requireParam("baseline_directory") + self.requireParam("output_directory") + self.requireParam("test_directory") self.handleCommonParams() - self.setStdout( dictionary ) + self.setStdout(dictionary) # update all the checksteps if self.p.check: for step in self.checksteps: - step.update( dictionary ) + step.update(dictionary) - def insertStep( self, steps ): + def insertStep(self, steps): # the step - steps.append( self ) + steps.append(self) # the post conditions if self.p.check: for step in self.checksteps: - step.insertStep( steps ) + step.insertStep(steps) - def makeArgs( self ): + def makeArgs(self): args = [] if self.p.deck: - args += [ "-i", self.p.deck ] + args += ["-i", os.path.join(self.p.test_directory, self.p.deck)] if self.p.restart_file: - args += [ "-r", self.p.restart_file ] + args += ["-r", os.path.abspath(os.path.join(self.p.output_directory, '..', self.p.restart_file))] if self.p.x_partitions: - args += [ "-x", self.p.x_partitions ] + args += ["-x", self.p.x_partitions] if self.p.y_partitions: - args += [ "-y", self.p.y_partitions ] + args += ["-y", self.p.y_partitions] if self.p.z_partitions: - args += [ "-z", self.p.z_partitions ] + args += ["-z", self.p.z_partitions] if self.p.schema_level: - args += [ "-s", self.p.schema_level ] + args += ["-s", self.p.schema_level] if self.p.name: - args += [ "-n", self.p.name ] + args += ["-n", self.p.name] if self.p.output_directory: - args += [ "-o", self.p.output_directory ] + args += ["-o", self.p.output_directory] # if self.p.ngpu == 0: if self.p.ngpu >= 0: - args += [ "--suppress-pinned" ] + args += ["--suppress-pinned"] if self.p.trace_data_migration: - args += [ "--trace-data-migration" ] + args += ["--trace-data-migration"] - return list( map( str, args ) ) + return list(map(str, args)) - def resultPaths( self ): + def resultPaths(self): paths = [] - name = getGeosProblemName( self.p.deck, self.p.name ) - paths += [ os.path.join( self.p.output_directory, "%s_restart_*" ) % name ] - paths += [ os.path.join( self.p.output_directory, "silo*" ) ] - paths += [ os.path.join( self.p.output_directory, "%s_bp_*" % name ) ] + name = getGeosProblemName(self.p.deck, self.p.name) + paths += [os.path.join(self.p.output_directory, "%s_restart_*") % name] + paths += [os.path.join(self.p.output_directory, "silo*")] + paths += [os.path.join(self.p.output_directory, "%s_bp_*" % name)] return paths - def clean( self ): - self._clean( self.resultPaths() ) + def clean(self): + self._clean(self.resultPaths()) ################################################################################ # restartcheck ################################################################################ -class restartcheck( CheckTestStepBase ): +class restartcheck(CheckTestStepBase): """ Class for the restart check test step. """ @@ -521,129 +524,127 @@ class restartcheck( CheckTestStepBase ): command = """restartcheck [-r RELATIVE] [-a ABSOLUTE] [-o OUTPUT] [-e EXCLUDE [EXCLUDE ...]] [-w] file_pattern baseline_pattern""" params = TestStepBase.defaultParams + CheckTestStepBase.checkParams + ( - TestStepBase.commonParams[ "deck" ], TestStepBase.commonParams[ "name" ], TestStepBase.commonParams[ "np" ], - TestStepBase.commonParams[ "allow_rebaseline" ], TestStepBase.commonParams[ "baseline_dir" ], - TestStepBase.commonParams[ "output_directory" ], - TestParam( "file_pattern", "Regex pattern to match file written out by geos." ), - TestParam( "baseline_pattern", "Regex pattern to match file to compare against." ), - TestParam( "rtol", - "Relative tolerance, default is 0.0." ), TestParam( "atol", "Absolute tolerance, default is 0.0." ), - TestParam( "exclude", "Regular expressions matching groups to exclude from the check, default is None." ), - TestParam( "warnings_are_errors", "Treat warnings as errors, default is True." ), - TestParam( "suppress_output", "Whether to write output to stdout, default is True." ), - TestParam( "skip_missing", "Whether to skip missing values in target or baseline files, default is False." ) ) - - def __init__( self, restartcheck_params, **kw ): + TestStepBase.commonParams["deck"], TestStepBase.commonParams["name"], TestStepBase.commonParams["np"], + TestStepBase.commonParams["allow_rebaseline"], TestStepBase.commonParams["baseline_directory"], + TestStepBase.commonParams["output_directory"], + TestParam("file_pattern", "Regex pattern to match file written out by geos."), + TestParam("baseline_pattern", "Regex pattern to match file to compare against."), + TestParam("rtol", + "Relative tolerance, default is 0.0."), TestParam("atol", "Absolute tolerance, default is 0.0."), + TestParam("exclude", "Regular expressions matching groups to exclude from the check, default is None."), + TestParam("warnings_are_errors", "Treat warnings as errors, default is True."), + TestParam("suppress_output", "Whether to write output to stdout, default is True."), + TestParam("skip_missing", "Whether to skip missing values in target or baseline files, default is False.")) + + def __init__(self, restartcheck_params, **kw): """ Set parameters with RESTARTCHECK_PARAMS and then with KEYWORDS. """ - CheckTestStepBase.__init__( self ) + CheckTestStepBase.__init__(self) self.p.warnings_are_errors = True if restartcheck_params is not None: - self.setParams( restartcheck_params, self.params ) - self.setParams( kw, self.params ) + self.setParams(restartcheck_params, self.params) + self.setParams(kw, self.params) - def label( self ): + def label(self): return "restartcheck" - def useMPI( self ): + def useMPI(self): return True - def executable( self ): + def executable(self): if self.getTestMode(): return "python -m mpi4py" else: return sys.executable + " -m mpi4py" - def update( self, dictionary ): - self.setParams( dictionary, self.params ) + def update(self, dictionary): + self.setParams(dictionary, self.params) self.handleCommonParams() - self.requireParam( "deck" ) - self.requireParam( "baseline_dir" ) - self.requireParam( "output_directory" ) + self.requireParam("deck") + self.requireParam("baseline_directory") + self.requireParam("output_directory") if self.p.file_pattern is None: - self.p.file_pattern = getGeosProblemName( self.p.deck, self.p.name ) + r"_restart_[0-9]+\.root" + self.p.file_pattern = getGeosProblemName(self.p.deck, self.p.name) + r"_restart_[0-9]+\.root" if self.p.baseline_pattern is None: self.p.baseline_pattern = self.p.file_pattern - self.restart_file_regex = os.path.join( self.p.output_directory, self.p.file_pattern ) - self.restart_baseline_regex = os.path.join( self.p.baseline_dir, self.p.baseline_pattern ) + self.restart_file_regex = os.path.join(self.p.output_directory, self.p.file_pattern) + self.restart_baseline_regex = os.path.join(self.p.baseline_directory, self.p.baseline_pattern) if self.p.allow_rebaseline is None: self.p.allow_rebaseline = True - def insertStep( self, steps ): + def insertStep(self, steps): if config.restartcheck_enabled and self.p.enabled: - steps.append( self ) + steps.append(self) - def makeArgs( self ): - cur_dir = os.path.dirname( os.path.realpath( __file__ ) ) - script_location = os.path.join( cur_dir, "helpers", "restart_check.py" ) - args = [ script_location ] + def makeArgs(self): + cur_dir = os.path.dirname(os.path.realpath(__file__)) + script_location = os.path.join(cur_dir, "helpers", "restart_check.py") + args = [script_location] if self.p.atol is not None: - args += [ "-a", self.p.atol ] + args += ["-a", self.p.atol] if self.p.rtol is not None: - args += [ "-r", self.p.rtol ] + args += ["-r", self.p.rtol] if self.p.warnings_are_errors: - args += [ "-w" ] + args += ["-w"] if self.p.suppress_output: - args += [ "-s" ] - if ( self.p.skip_missing or config.restart_skip_missing ): - args += [ "-m" ] + args += ["-s"] + if (self.p.skip_missing or config.restart_skip_missing): + args += ["-m"] exclude_values = config.restart_exclude_pattern if self.p.exclude is not None: - exclude_values.extend( self.p.exclude ) + exclude_values.extend(self.p.exclude) for v in exclude_values: - args += [ "-e", v ] + args += ["-e", v] - args += [ self.restart_file_regex, self.restart_baseline_regex ] - return list( map( str, args ) ) + args += [self.restart_file_regex, self.restart_baseline_regex] + return list(map(str, args)) - def rebaseline( self ): + def rebaseline(self): if not self.p.allow_rebaseline: - Log( "Rebaseline not allowed for restartcheck of %s." % self.p.name ) + Log("Rebaseline not allowed for restartcheck of %s." % self.p.name) return - root_file_path = findMaxMatchingFile( self.restart_file_regex ) + root_file_path = findMaxMatchingFile(self.restart_file_regex) if root_file_path is None: - raise IOError( "File not found matching the pattern %s in directory %s." % - ( self.restart_file_regex, os.getcwd() ) ) + raise IOError("File not found matching the pattern %s in directory %s." % + (self.restart_file_regex, os.getcwd())) - baseline_dir = os.path.dirname( self.restart_baseline_regex ) - root_baseline_path = findMaxMatchingFile( self.restart_baseline_regex ) + baseline_directory = os.path.dirname(self.restart_baseline_regex) + root_baseline_path = findMaxMatchingFile(self.restart_baseline_regex) if root_baseline_path is not None: # Delete the baseline root file. - os.remove( root_baseline_path ) + os.remove(root_baseline_path) # Delete the directory holding the baseline data files. - data_dir_path = os.path.splitext( root_baseline_path )[ 0 ] - shutil.rmtree( data_dir_path ) + data_dir_path = os.path.splitext(root_baseline_path)[0] + shutil.rmtree(data_dir_path) else: - os.makedirs( baseline_dir, exist_ok=True ) + os.makedirs(baseline_directory, exist_ok=True) # Copy the root file into the baseline directory. - shutil.copy2( root_file_path, os.path.join( baseline_dir, os.path.basename( root_file_path ) ) ) + shutil.copy2(root_file_path, os.path.join(baseline_directory, os.path.basename(root_file_path))) # Copy the directory holding the data files into the baseline directory. - data_dir_path = os.path.splitext( root_file_path )[ 0 ] - shutil.copytree( data_dir_path, os.path.join( baseline_dir, os.path.basename( data_dir_path ) ) ) + data_dir_path = os.path.splitext(root_file_path)[0] + shutil.copytree(data_dir_path, os.path.join(baseline_directory, os.path.basename(data_dir_path))) - def resultPaths( self ): - return [ - os.path.join( self.p.output_directory, "%s.restartcheck" % os.path.splitext( self.p.file_pattern )[ 0 ] ) - ] + def resultPaths(self): + return [os.path.join(self.p.output_directory, "%s.restartcheck" % os.path.splitext(self.p.file_pattern)[0])] - def clean( self ): - self._clean( self.resultPaths() ) + def clean(self): + self._clean(self.resultPaths()) ################################################################################ # curvecheck ################################################################################ -class curvecheck( CheckTestStepBase ): +class curvecheck(CheckTestStepBase): """ Class for the curve check test step. """ @@ -653,186 +654,186 @@ class curvecheck( CheckTestStepBase ): command = """curve_check.py [-h] [-c CURVE [CURVE ...]] [-t TOLERANCE] [-w] [-o OUTPUT] [-n N_COLUMN] [-u {milliseconds,seconds,minutes,hours,days,years}] filename baseline""" params = TestStepBase.defaultParams + CheckTestStepBase.checkParams + ( - TestStepBase.commonParams[ "deck" ], TestStepBase.commonParams[ "name" ], TestStepBase.commonParams[ "np" ], - TestStepBase.commonParams[ "allow_rebaseline" ], TestStepBase.commonParams[ "baseline_dir" ], - TestStepBase.commonParams[ "output_directory" ], - TestParam( "filename", "Name of the target curve file written by GEOS." ), - TestParam( "curves", "A list of parameter, setname value pairs." ), + TestStepBase.commonParams["deck"], TestStepBase.commonParams["name"], TestStepBase.commonParams["np"], + TestStepBase.commonParams["allow_rebaseline"], TestStepBase.commonParams["baseline_directory"], + TestStepBase.commonParams["output_directory"], + TestParam("filename", "Name of the target curve file written by GEOS."), + TestParam("curves", "A list of parameter, setname value pairs."), TestParam( "tolerance", "Curve check tolerance (||x-y||/N), can be specified as a single value or a list of floats corresponding to the curves." - ), TestParam( "warnings_are_errors", "Treat warnings as errors, default is True." ), - TestParam( "script_instructions", "A list of (path, function, value, setname) entries" ), - TestParam( "time_units", "Time units to use for plots." ) ) + ), TestParam("warnings_are_errors", "Treat warnings as errors, default is True."), + TestParam("script_instructions", "A list of (path, function, value, setname) entries"), + TestParam("time_units", "Time units to use for plots.")) - def __init__( self, curvecheck_params, **kw ): + def __init__(self, curvecheck_params, **kw): """ Set parameters with CURVECHECK_PARAMS and then with KEYWORDS. """ - CheckTestStepBase.__init__( self ) + CheckTestStepBase.__init__(self) self.p.warnings_are_errors = True if curvecheck_params is not None: c = curvecheck_params.copy() - Nc = len( c.get( 'curves', [] ) ) + Nc = len(c.get('curves', [])) # Note: ats seems to store list/tuple parameters incorrectly # Convert these to strings - for k in [ 'curves', 'script_instructions' ]: + for k in ['curves', 'script_instructions']: if k in c: - if isinstance( c[ k ], ( list, tuple ) ): - c[ k ] = ';'.join( [ ','.join( c ) for c in c[ k ] ] ) + if isinstance(c[k], (list, tuple)): + c[k] = ';'.join([','.join(c) for c in c[k]]) # Check whether tolerance was specified as a single float, list # and then convert into a comma-delimited string - tol = c.get( 'tolerance', 0.0 ) - if isinstance( tol, ( float, int ) ): - tol = [ tol ] * Nc - c[ 'tolerance' ] = ','.join( [ str( x ) for x in tol ] ) + tol = c.get('tolerance', 0.0) + if isinstance(tol, (float, int)): + tol = [tol] * Nc + c['tolerance'] = ','.join([str(x) for x in tol]) - self.setParams( c, self.params ) - self.setParams( kw, self.params ) + self.setParams(c, self.params) + self.setParams(kw, self.params) - def label( self ): + def label(self): return "curvecheck" - def useMPI( self ): + def useMPI(self): return True - def executable( self ): + def executable(self): if self.getTestMode(): return "python" else: return sys.executable - def update( self, dictionary ): - self.setParams( dictionary, self.params ) + def update(self, dictionary): + self.setParams(dictionary, self.params) self.handleCommonParams() - self.requireParam( "deck" ) - self.requireParam( "baseline_dir" ) - self.requireParam( "output_directory" ) + self.requireParam("deck") + self.requireParam("baseline_directory") + self.requireParam("output_directory") - self.baseline_file = os.path.join( self.p.baseline_dir, self.p.filename ) - self.target_file = os.path.join( self.p.output_directory, self.p.filename ) - self.figure_root = os.path.join( self.p.output_directory, 'curve_check' ) + self.baseline_file = os.path.join(self.p.baseline_directory, self.p.filename) + self.target_file = os.path.join(self.p.output_directory, self.p.filename) + self.figure_root = os.path.join(self.p.output_directory, 'curve_check') if self.p.allow_rebaseline is None: self.p.allow_rebaseline = True - def insertStep( self, steps ): + def insertStep(self, steps): if config.restartcheck_enabled and self.p.enabled: - steps.append( self ) + steps.append(self) - def makeArgs( self ): - cur_dir = os.path.dirname( os.path.realpath( __file__ ) ) - script_location = os.path.join( cur_dir, "helpers", "curve_check.py" ) - args = [ script_location ] + def makeArgs(self): + cur_dir = os.path.dirname(os.path.realpath(__file__)) + script_location = os.path.join(cur_dir, "helpers", "curve_check.py") + args = [script_location] if self.p.curves is not None: - for c in self.p.curves.split( ';' ): - args += [ "-c" ] - args += c.split( ',' ) + for c in self.p.curves.split(';'): + args += ["-c"] + args += c.split(',') if self.p.tolerance is not None: - for t in self.p.tolerance.split( ',' ): - args += [ "-t", t ] + for t in self.p.tolerance.split(','): + args += ["-t", t] if self.p.time_units is not None: - args += [ "-u", self.p.time_units ] + args += ["-u", self.p.time_units] if self.p.script_instructions is not None: - for c in self.p.script_instructions.split( ';' ): - args += [ "-s" ] - args += c.split( ',' ) + for c in self.p.script_instructions.split(';'): + args += ["-s"] + args += c.split(',') if self.p.warnings_are_errors: - args += [ "-w" ] + args += ["-w"] - args += [ '-o', self.figure_root ] - args += [ self.target_file, self.baseline_file ] - return list( map( str, args ) ) + args += ['-o', self.figure_root] + args += [self.target_file, self.baseline_file] + return list(map(str, args)) - def rebaseline( self ): + def rebaseline(self): if not self.p.allow_rebaseline: - Log( "Rebaseline not allowed for curvecheck of %s." % self.p.name ) + Log("Rebaseline not allowed for curvecheck of %s." % self.p.name) return - baseline_dir = os.path.split( self.baseline_file )[ 0 ] - os.makedirs( baseline_dir, exist_ok=True ) - shutil.copyfile( self.target_file, self.baseline_file ) + baseline_directory = os.path.split(self.baseline_file)[0] + os.makedirs(baseline_directory, exist_ok=True) + shutil.copyfile(self.target_file, self.baseline_file) - def resultPaths( self ): - figure_pattern = os.path.join( self.figure_root, '*.png' ) - figure_list = sorted( glob.glob( figure_pattern ) ) - return [ self.target_file ] + figure_list + def resultPaths(self): + figure_pattern = os.path.join(self.figure_root, '*.png') + figure_list = sorted(glob.glob(figure_pattern)) + return [self.target_file] + figure_list - def clean( self ): - self._clean( self.resultPaths() ) + def clean(self): + self._clean(self.resultPaths()) -def infoTestStepParams( params, maxwidth=None ): +def infoTestStepParams(params, maxwidth=None): if maxwidth is None: - maxwidth = max( 10, max( [ len( p.name ) for p in params ] ) ) + maxwidth = max(10, max([len(p.name) for p in params])) for p in params: paramdoc = p.doc if p.default is not None: - paramdoc += " (default = %s)" % ( p.default ) - paramdoc = textwrap.wrap( paramdoc, width=100 - maxwidth ) - logger.debug( " %*s:" % ( maxwidth, p.name ), paramdoc[ 0 ].strip() ) - for line in paramdoc[ 1: ]: - logger.debug( " %*s %s" % ( maxwidth, "", line.strip() ) ) + paramdoc += " (default = %s)" % (p.default) + paramdoc = textwrap.wrap(paramdoc, width=100 - maxwidth) + logger.debug(" %*s:" % (maxwidth, p.name), paramdoc[0].strip()) + for line in paramdoc[1:]: + logger.debug(" %*s %s" % (maxwidth, "", line.strip())) -def infoTestStep( stepname ): - topic = common_utilities.InfoTopic( stepname ) +def infoTestStep(stepname): + topic = common_utilities.InfoTopic(stepname) topic.startBanner() - logger.debug( f"TestStep: {stepname}" ) - stepclass = globals()[ stepname ] - if not hasattr( stepclass, "doc" ): + logger.debug(f"TestStep: {stepname}") + stepclass = globals()[stepname] + if not hasattr(stepclass, "doc"): return - logger.debug( "Description:" ) - doc = textwrap.dedent( stepclass.doc ) - doc = textwrap.wrap( doc, width=100 ) + logger.debug("Description:") + doc = textwrap.dedent(stepclass.doc) + doc = textwrap.wrap(doc, width=100) for line in doc: - logger.debug( " ", line.strip() ) + logger.debug(" ", line.strip()) - logger.debug( "Command:" ) - doc = textwrap.dedent( stepclass.command ) - doc = textwrap.wrap( doc, width=100 ) - logger.debug( f" {doc[0].strip()}" ) - for line in doc[ 1: ]: - logger.debug( f'\\\n {" " * len(stepname)} {line}' ) + logger.debug("Command:") + doc = textwrap.dedent(stepclass.command) + doc = textwrap.wrap(doc, width=100) + logger.debug(f" {doc[0].strip()}") + for line in doc[1:]: + logger.debug(f'\\\n {" " * len(stepname)} {line}') # compute max param width: - allparams = [ p.name for p in stepclass.params ] - if hasattr( stepclass, "checkstepnames" ): + allparams = [p.name for p in stepclass.params] + if hasattr(stepclass, "checkstepnames"): for checkstep in stepclass.checkstepnames: - checkclass = globals()[ checkstep ] - if not hasattr( checkclass, "doc" ): + checkclass = globals()[checkstep] + if not hasattr(checkclass, "doc"): continue - allparams.extend( [ p.name for p in checkclass.params ] ) - maxwidth = max( 10, max( [ len( p ) for p in allparams ] ) ) + allparams.extend([p.name for p in checkclass.params]) + maxwidth = max(10, max([len(p) for p in allparams])) - logger.debug( "Parameters:" ) - infoTestStepParams( stepclass.params, maxwidth ) + logger.debug("Parameters:") + infoTestStepParams(stepclass.params, maxwidth) - paramset = set( [ p.name for p in stepclass.params ] ) + paramset = set([p.name for p in stepclass.params]) - if hasattr( stepclass, "checkstepnames" ): + if hasattr(stepclass, "checkstepnames"): for checkstep in stepclass.checkstepnames: - logger.debug( f"CheckStep: {checkstep}" ) + logger.debug(f"CheckStep: {checkstep}") checkparams = [] - checkclass = globals()[ checkstep ] - if not hasattr( checkclass, "doc" ): + checkclass = globals()[checkstep] + if not hasattr(checkclass, "doc"): continue for p in checkclass.params: if p.name not in paramset: - checkparams.append( p ) + checkparams.append(p) - infoTestStepParams( checkparams, maxwidth ) + infoTestStepParams(checkparams, maxwidth) topic.endBanner() -def infoTestSteps( *args ): +def infoTestSteps(*args): """This function is used to print documentation about the teststeps to stdout""" # get the list of step classes @@ -840,39 +841,39 @@ def infoTestSteps( *args ): checkstepnames = [] for k, v in globals().items(): - if not isinstance( v, type ): + if not isinstance(v, type): continue - if v in ( CheckTestStepBase, TestStepBase ): + if v in (CheckTestStepBase, TestStepBase): continue try: - if issubclass( v, CheckTestStepBase ): - checkstepnames.append( k ) - elif issubclass( v, TestStepBase ): - steps.append( k ) + if issubclass(v, CheckTestStepBase): + checkstepnames.append(k) + elif issubclass(v, TestStepBase): + steps.append(k) except TypeError as e: - logger.debug( e ) + logger.debug(e) - steps = sorted( steps ) - checkstepnames = sorted( checkstepnames ) + steps = sorted(steps) + checkstepnames = sorted(checkstepnames) steps = steps + checkstepnames def all(): for s in steps: - infoTestStep( s ) + infoTestStep(s) - topic = common_utilities.InfoTopic( "teststep" ) - topic.addTopic( "all", "full info on all the teststeps", all ) + topic = common_utilities.InfoTopic("teststep") + topic.addTopic("all", "full info on all the teststeps", all) for s in steps: - stepclass = globals()[ s ] - doc = getattr( stepclass, "doc", None ) - topic.addTopic( s, textwrap.dedent( doc ).strip(), lambda ss=s: infoTestStep( ss ) ) + stepclass = globals()[s] + doc = getattr(stepclass, "doc", None) + topic.addTopic(s, textwrap.dedent(doc).strip(), lambda ss=s: infoTestStep(ss)) - topic.process( args ) + topic.process(args) # Register test step definitions -ats.manager.define( geos=geos ) -ats.manager.define( restartcheck=restartcheck ) -ats.manager.define( config=config ) +ats.manager.define(geos=geos) +ats.manager.define(restartcheck=restartcheck) +ats.manager.define(config=config) From 2d7cc12163f815d8d67d70fe0eec0f04cf2d2aa9 Mon Sep 17 00:00:00 2001 From: Christopher Sherman Date: Mon, 22 Jan 2024 15:54:44 -0800 Subject: [PATCH 02/14] Removing unused code, fixing reports --- .../geos_ats/configuration_record.py | 9 - geos_ats_package/geos_ats/main.py | 52 +- geos_ats_package/geos_ats/reporting.py | 1102 +++-------------- geos_ats_package/geos_ats/test_case.py | 142 +-- geos_ats_package/geos_ats/test_steps.py | 10 +- geos_ats_package/setup.cfg | 1 + 6 files changed, 180 insertions(+), 1136 deletions(-) diff --git a/geos_ats_package/geos_ats/configuration_record.py b/geos_ats_package/geos_ats/configuration_record.py index 38b2f18..8d251c5 100644 --- a/geos_ats_package/geos_ats/configuration_record.py +++ b/geos_ats_package/geos_ats/configuration_record.py @@ -193,11 +193,6 @@ def initializeConfig(configFile, configOverride, options): config.add("report_doc_remake", bool, False, "Remake test documentation, even if it already exists (used with html reports)") - config.add("report_text", bool, True, "True if you want text results to be generated with the report action") - config.add("report_text_file", str, "test_results.txt", "Location to write the text report") - config.add("report_text_echo", bool, True, "If True, echo the report to stdout") - config.add("report_wait", bool, False, "Wait until all tests are complete before reporting") - config.add("report_ini", bool, True, "True if you want ini results to be generated with the report action") config.add("report_ini_file", str, "test_results.ini", "Location to write the ini report") @@ -212,10 +207,6 @@ def initializeConfig(configFile, configOverride, options): config.add("checkmessages_never_ignore_regexp", type([]), ["not yet implemented"], "Regular expression to not ignore in all checkmessages steps.") - config.add("report_timing", bool, False, "True if you want timing file to be generated with the report action") - config.add("report_timing_overwrite", bool, False, - "True if you want timing file to overwrite existing timing file rather than augment it") - # timing and priority config.add("priority", str, "equal", "Method of prioritization of tests: [\"equal\", \"processors\",\"timing\"]") config.add("timing_file", str, "timing.txt", "Location of timing file") diff --git a/geos_ats_package/geos_ats/main.py b/geos_ats_package/geos_ats/main.py index 8595327..b22af5b 100644 --- a/geos_ats_package/geos_ats/main.py +++ b/geos_ats_package/geos_ats/main.py @@ -208,68 +208,29 @@ def info(args): def report(manager): """The report action""" - from geos_ats import (test_case, reporting, configuration_record) - - testcases = test_case.TESTS.values() - - if configuration_record.config.report_wait: - reporter = reporting.ReportWait(testcases) - reporter.report(sys.stdout) - - if configuration_record.config.report_text: - reporter = reporting.ReportText(testcases) - with open(configuration_record.config.report_text_file, "w") as filep: - reporter.report(filep) - if configuration_record.config.report_text_echo: - with open(configuration_record.config.report_text_file, "r") as filep: - sys.stdout.write(filep.read()) + from geos_ats import (reporting, configuration_record) if configuration_record.config.report_html: - reporter = reporting.ReportHTML(testcases) + reporter = reporting.ReportHTML(manager.testlist) reporter.report() if configuration_record.config.report_ini: - reporter = reporting.ReportIni(testcases) + reporter = reporting.ReportIni(manager.testlist) with open(configuration_record.config.report_ini_file, "w") as filep: reporter.report(filep) - if configuration_record.config.report_timing: - reporter = reporting.ReportTiming(testcases) - if not configuration_record.config.report_timing_overwrite: - try: - with open(configuration_record.config.timing_file, "r") as filep: - reporter.getOldTiming(filep) - except IOError as e: - logger.debug(e) - with open(configuration_record.config.timing_file, "w") as filep: - reporter.report(filep) - def summary(manager, alog, short=False): """Periodic summary and final summary""" - from geos_ats import (reporting, configuration_record, test_case) + from geos_ats import (reporting, configuration_record) if len(manager.testlist) == 0: return - if hasattr(manager.machine, "getNumberOfProcessors"): - totalNumberOfProcessors = getattr(manager.machine, "getNumberOfProcessors", None)() - else: - totalNumberOfProcessors = 1 - reporter = reporting.ReportTextPeriodic(manager.testlist) - reporter.report(geos_atsStartTime, totalNumberOfProcessors) - if configuration_record.config.report_html and configuration_record.config.report_html_periodic: - testcases = test_case.TESTS.values() - reporter = reporting.ReportHTML(testcases) + reporter = reporting.ReportHTML(manager.testlist) reporter.report(refresh=30) - if configuration_record.config.report_text: - testcases = test_case.TESTS.values() - reporter = reporting.ReportText(testcases) - with open(configuration_record.config.report_text_file, "w") as filep: - reporter.report(filep) - def append_geos_ats_summary(manager): initial_summary = manager.summary @@ -346,7 +307,6 @@ def main(): # Check the report location if options.logs: config.report_html_file = os.path.join(options.logs, 'test_results.html') - config.report_text_file = os.path.join(options.logs, 'test_results.txt') config.report_ini_file = os.path.join(options.logs, 'test_results.ini') build_ats_arguments(options, originalargv, config) @@ -424,7 +384,7 @@ def main(): # clean if options.action == "veryclean": common_utilities.removeLogDirectories(os.getcwd()) - files = [config.report_html_file, config.report_ini_file, config.report_text_file] + files = [config.report_html_file, config.report_ini_file] for f in files: if os.path.exists(f): os.remove(f) diff --git a/geos_ats_package/geos_ats/reporting.py b/geos_ats_package/geos_ats/reporting.py index 5230850..633f5dd 100644 --- a/geos_ats_package/geos_ats/reporting.py +++ b/geos_ats_package/geos_ats/reporting.py @@ -1,119 +1,88 @@ import os import socket -import subprocess import time -import re from geos_ats.configuration_record import config -import sys -import ats # type: ignore[import] from configparser import ConfigParser +from tabulate import tabulate +import glob import logging +from ats.times import hms +from ats import (PASSED, FAILED, TIMEDOUT, EXPECTED, BATCHED, FILTERED, + SKIPPED, CREATED, RUNNING, HALTED, LSFERROR) # Get the active logger instance logger = logging.getLogger('geos_ats') -# The following are ALEATS test status values. -# The order is important for the ReportGroup: lower values take precendence -FAILRUN = 0 -FAILCHECK = 1 -FAILCHECKMINOR = 2 -TIMEOUT = 3 -INPROGRESS = 4 -NOTRUN = 5 -FILTERED = 6 -RUNNING = 7 -SKIP = 8 -BATCH = 9 -FAILRUNOPTIONAL = 10 -NOTBUILT = 11 -PASS = 12 -EXPECTEDFAIL = 13 -UNEXPECTEDPASS = 14 +# Status value in priority order +STATUS = (EXPECTED, CREATED, BATCHED, FILTERED, SKIPPED, RUNNING, + PASSED, TIMEDOUT, HALTED, LSFERROR, FAILED) -status_map = { - 'FAILRUN': FAILRUN, - 'FAILCHECK': FAILCHECK, - 'FAILCHECKMINOR': FAILCHECKMINOR, - 'TIMEOUT': TIMEOUT, - 'INPROGRESS': INPROGRESS, - 'NOTRUN': NOTRUN, - 'FILTERED': FILTERED, - 'RUNNING': RUNNING, - 'SKIP': SKIP, - 'BATCH': BATCH, - 'FAILRUNOPTIONAL': FAILRUNOPTIONAL, - 'NOTBUILT': NOTBUILT, - 'PASS': PASS, - 'EXPECTEDFAIL': EXPECTEDFAIL, - 'UNEXPECTEDPASS': UNEXPECTEDPASS -} +COLORS = {} +COLORS[EXPECTED.name] = "black" +COLORS[CREATED.name] = "black" +COLORS[BATCHED.name] = "black" +COLORS[FILTERED.name] = "black" +COLORS[SKIPPED.name] = "orange" +COLORS[RUNNING.name] = "blue" +COLORS[PASSED.name] = "green" +COLORS[TIMEDOUT.name] = "red" +COLORS[HALTED.name] = "brown" +COLORS[LSFERROR.name] = "brown" +COLORS[FAILED.name] = "red" -# A tuple of test status values. -STATUS = (FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, FILTERED, RUNNING, - INPROGRESS, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT) -STATUS_NOTDONE = (NOTRUN, RUNNING, INPROGRESS, BATCH) +def max_status(sa, sb): + Ia = STATUS.index(sa) + Ib = STATUS.index(sb) + return STATUS[max(Ia, Ib)] class ReportBase(object): - """Base class for reporting. The constructor takes in a sequence - of testcases (of type test_case), and from each testcase, a - ReportTestCase object is created.""" + """Base class for reporting""" + + def __init__(self, test_steps): + self.test_results = {} + self.test_groups = {} + self.status_lists = {} + + for t in test_steps: + # Parse the test step name + step_name = t.name[t.name.find('(')+1:t.name.rfind('_')] + test_name = step_name[:step_name.rfind('_')] + test_id = t.group.number + group_name = test_name[:test_name.rfind('_')] + + # Save data + if test_name not in self.test_results: + self.test_results[test_name] = {'steps': {}, 'status': EXPECTED, 'id': test_id, 'elapsed': 0.0, 'current_step': ' ', 'resources': t.np} + self.test_results[test_name]['steps'][t.name] = {'status': t.status, 'log': t.outname, 'output': t.step_outputs, 'number': t.groupSerialNumber} + + # Check elapsed time + elapsed = 0.0 + if hasattr(t, 'endTime'): + elapsed = t.endTime - t.startTime + self.test_results[test_name]['steps'][t.name]['elapsed'] = elapsed + self.test_results[test_name]['elapsed'] += elapsed + + # Check the status and the latest step + self.test_results[test_name]['status'] = max_status(t.status, self.test_results[test_name]['status']) + if t.status not in (EXPECTED, CREATED, BATCHED, FILTERED, SKIPPED): + self.test_results[test_name]['current_step'] = t.name + + if group_name not in self.test_groups: + self.test_groups[group_name] = {'tests': [], 'status': EXPECTED} + self.test_groups[group_name]['tests'].append(test_name) + self.test_groups[group_name]['status'] = max_status(t.status, self.test_groups[group_name]['status']) + + # Collect status names + for s in STATUS: + self.status_lists[s.name] = [k for k, v in self.test_results.items() if v['status'] == s] - def __init__(self, testcases): - pass - - -class ReportTiming(ReportBase): - """Reporting class that is used for outputting test timings""" - - def __init__(self, testcases): - self.reportcases = [ReportTestCase(t) for t in testcases] - self.timings = {} - - def getOldTiming(self, fp): - for line in fp: - if not line.startswith('#'): - tokens = line.split() - self.timings[tokens[0]] = int(tokens[1]) - - def report(self, fp): - for testcase in self.reportcases: - if testcase.status in [PASS, TIMEOUT]: - self.timings[testcase.testcase.name] = int(testcase.testcase.status.totalTime()) - output = "" - for key in sorted(self.timings): - output += "%s %d\n" % (key, self.timings[key]) - fp.writelines(output) + self.html_filename = config.report_html_file class ReportIni(ReportBase): - """Minimal reporting class that is used for bits status emails""" - - def __init__(self, testcases): - self.reportcases = [ReportTestCase(t) for t in testcases] - - # A dictionary where the key is a status, and the value is a sequence of ReportTestCases - self.reportcaseResults = {} - for status in STATUS: - self.reportcaseResults[status] = [t for t in self.reportcases if t.status == status] - - self.displayName = {} - self.displayName[FAILRUN] = "FAILRUN" - self.displayName[FAILRUNOPTIONAL] = "FAILRUNOPTIONAL" - self.displayName[FAILCHECK] = "FAILCHECK" - self.displayName[FAILCHECKMINOR] = "FAILCHECKMINOR" - self.displayName[TIMEOUT] = "TIMEOUT" - self.displayName[NOTRUN] = "NOTRUN" - self.displayName[INPROGRESS] = "INPROGRESS" - self.displayName[FILTERED] = "FILTERED" - self.displayName[RUNNING] = "RUNNING" - self.displayName[PASS] = "PASSED" - self.displayName[SKIP] = "SKIPPED" - self.displayName[BATCH] = "BATCHED" - self.displayName[NOTBUILT] = "NOTBUILT" - self.displayName[EXPECTEDFAIL] = "EXPECTEDFAIL" - self.displayName[UNEXPECTEDPASS] = "UNEXPECTEDPASS" + """Minimal reporting class""" def report(self, fp): configParser = ConfigParser() @@ -140,537 +109,23 @@ def report(self, fp): configParser.set("Info", "Extra Notations", extraNotations) configParser.add_section("Results") - configParser.add_section("Custodians") - configParser.add_section("Documentation") - undocumentedTests = [] - for status in STATUS: - testNames = [] - for reportcaseResult in self.reportcaseResults[status]: - testName = reportcaseResult.testcase.name - testNames.append(testName) - - owner = getowner(testName, reportcaseResult.testcase) - if owner is not None: - configParser.set("Custodians", testName, owner) + for k, v in self.status_lists.items(): + configParser.set("Results", k, ";".join(sorted(v))) - if config.report_doc_link: - linkToDocumentation = os.path.join(config.report_doc_dir, testName, testName + ".html") - if os.path.exists(linkToDocumentation): - configParser.set("Documentation", testName, linkToDocumentation) - else: - if not reportcaseResult.testcase.nodoc: - undocumentedTests.append(testName) - linkToDocumentation = getowner(testName, reportcaseResult.testcase) - testNames = sorted(testNames) - configParser.set("Results", self.displayName[status], ";".join(testNames)) - undocumentedTests = sorted(undocumentedTests) - configParser.set("Documentation", "undocumented", ";".join(undocumentedTests)) configParser.write(fp) -class ReportText(ReportBase): - - def __init__(self, testcases): - - ReportBase.__init__(self, testcases) - - self.reportcases = [ReportTestCase(t) for t in testcases] - - # A dictionary where the key is a status, and the value is a sequence of ReportTestCases - self.reportcaseResults = {} - for status in STATUS: - self.reportcaseResults[status] = [t for t in self.reportcases if t.status == status] - - self.displayName = {} - self.displayName[FAILRUN] = "FAIL RUN" - self.displayName[FAILRUNOPTIONAL] = "FAIL RUN (OPTIONAL STEP)" - self.displayName[FAILCHECK] = "FAIL CHECK" - self.displayName[FAILCHECKMINOR] = "FAIL CHECK (MINOR)" - self.displayName[TIMEOUT] = "TIMEOUT" - self.displayName[NOTRUN] = "NOT RUN" - self.displayName[INPROGRESS] = "INPROGRESS" - self.displayName[FILTERED] = "FILTERED" - self.displayName[RUNNING] = "RUNNING" - self.displayName[PASS] = "PASSED" - self.displayName[SKIP] = "SKIPPED" - self.displayName[BATCH] = "BATCHED" - self.displayName[NOTBUILT] = "NOT BUILT" - self.displayName[EXPECTEDFAIL] = "EXPECTEDFAIL" - self.displayName[UNEXPECTEDPASS] = "UNEXPECTEDPASS" - - def report(self, fp): - """Write out the text report to the give file pointer""" - self.writeSummary(fp, (FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, - INPROGRESS, FILTERED, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT)) - self.writeLongest(fp, 5) - self.writeDetails(fp, (FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, FILTERED)) - - def writeSummary(self, fp, statuses=STATUS): - """The summary groups each TestCase by its status.""" - fp.write("=" * 80) - - from geos_ats import common_utilities - for status in statuses: - - tests = self.reportcaseResults[status] - num = len(tests) - fp.write(f"\n {self.displayName[status]} : {num}") - if num > 0: - testlist = [] - for test in tests: - testname = test.testcase.name - retries = getattr(test.testcase.atsGroup, "retries", 0) - if retries > 0: - testname += '[retry:%d]' % retries - testlist.append(testname) - fp.write(f' ( {" ".join( testlist )} ) ') - - def writeDetails(self, - fp, - statuses=(FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, INPROGRESS), - columns=("Status", "TestCase", "Elapsed", "Resources", "TestStep", "OutFile")): - """This function provides more information about each of the test cases""" - - from geos_ats import common_utilities - - table = common_utilities.TextTable(len(columns)) - table.setHeader(*columns) - table.rowbreakstyle = "-" - printTable = False - - for status in statuses: - tests = self.reportcaseResults[status] - - if len(tests) == 0: - continue - - printTable = True - for test in tests: - testcase = test.testcase - label = "" - pathstr = "" - if test.laststep: - paths = testcase.resultPaths(test.laststep) - label = test.laststep.label() - pathstr = " ".join([os.path.relpath(x) for x in paths]) - - row = [] - for col in columns: - if col == "Status": - statusDisplay = self.displayName[test.status] - retries = getattr(testcase.atsGroup, "retries", 0) - if retries > 0: - statusDisplay += "/retry:%d" % retries - row.append(statusDisplay) - elif col == "Directory": - row.append(os.path.relpath(testcase.path)) - elif col == "TestCase": - row.append(testcase.name) - elif col == "TestStep": - row.append(label) - elif col == "OutFile": - row.append(pathstr) - elif col == "Elapsed": - row.append(ats.times.hms(test.elapsed)) - elif col == "Resources": - row.append(ats.times.hms(test.resources)) - else: - raise RuntimeError(f"Unknown column {col}") - - table.addRow(*row) - - table.addRowBreak() - - fp.write('\n') - if printTable: - table.printTable(fp) - fp.write('\n') - - def writeLongest(self, fp, num=5): - """The longer running tests are reported""" - - timing = [] - - for test in self.reportcases: - elapsed = test.elapsed - if elapsed > 0: - timing.append((elapsed, test)) - - timing = sorted(timing, reverse=True) - - if len(timing) > 0: - fp.write('\n') - fp.write('\n LONGEST RUNNING TESTS:') - for elapsed, test in timing[:num]: - fp.write(f" {ats.times.hms(elapsed)} {test.testcase.name}") - - -class ReportTextPeriodic(ReportText): - """This class is used during the periodic reports. It is - initialized with the actual ATS tests from the ATS manager object. - The report inherits from ReportText, and extend that behavior with - """ - - def __init__(self, atstests): - - self.atstest = atstests - testcases = list(set([test.geos_atsTestCase for test in atstests])) - ReportText.__init__(self, testcases) - - def report(self, startTime, totalProcessors=None): - self.writeSummary(sys.stdout, - (FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, - INPROGRESS, FILTERED, RUNNING, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT)) - self.writeUtilization(sys.stdout, startTime, totalProcessors) - self.writeLongest(sys.stdout) - self.writeDetails(sys.stdout, (FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, RUNNING), - ("Status", "TestCase", "Directory", "Elapsed", "Resources", "TestStep")) - - def writeUtilization(self, fp, startTime, totalProcessors=None): - """Machine utilization is reported""" - totalResourcesUsed = 0.0 - totaltime = time.time() - startTime - for test in self.reportcases: - elapsed = test.elapsed - resources = test.resources - totalResourcesUsed += resources - - if totalResourcesUsed > 0: - fp.write('\n') - fp.write(f"\n TOTAL TIME : {ats.times.hms( totaltime )}") - fp.write(f"\n TOTAL PROCESSOR-TIME : {ats.times.hms(totalResourcesUsed )}") - - if totalProcessors: - availableResources = totalProcessors * totaltime - utilization = totalResourcesUsed / availableResources * 100.0 - fp.write(f" AVAIL PROCESSOR-TIME : {ats.times.hms(availableResources )}") - fp.write(f" RESOURCE UTILIZATION : {utilization:5.3g}%") - - class ReportHTML(ReportBase): """HTML Reporting""" - # only launch a web browser once. - launchedBrowser = False - - def __init__(self, testcases): - ReportBase.__init__(self, testcases) - - self.reportcases = [ReportTestCase(t) for t in testcases] - - # A dictionary keyed by Status. The value is a list of ReportGroup - self.groupResults = None - - # A sorted list of all the ReportGroup - self.groups = None - - # Initialize the ReportGroups - self.initializeReportGroups() - - self.color = {} - self.color[FAILRUN] = "red" - self.color[FAILRUNOPTIONAL] = "yellow" - self.color[FAILCHECK] = "reddish" - self.color[FAILCHECKMINOR] = "reddish" - self.color[TIMEOUT] = "reddish" - self.color[NOTRUN] = "yellow" - self.color[INPROGRESS] = "blue" - self.color[FILTERED] = "blueish" - self.color[RUNNING] = "orange" - self.color[PASS] = "green" - self.color[SKIP] = "yellow" - self.color[BATCH] = "yellow" - self.color[NOTBUILT] = "blueish" - self.color[EXPECTEDFAIL] = "green" - self.color[UNEXPECTEDPASS] = "red" - - self.displayName = {} - self.displayName[FAILRUN] = "FAIL RUN" - self.displayName[FAILRUNOPTIONAL] = "FAIL RUN (OPTIONAL STEP)" - self.displayName[FAILCHECK] = "FAIL CHECK" - self.displayName[FAILCHECKMINOR] = "FAIL CHECK (MINOR)" - self.displayName[TIMEOUT] = "TIMEOUT" - self.displayName[NOTRUN] = "NOT RUN" - self.displayName[INPROGRESS] = "INPROGRESS" - self.displayName[FILTERED] = "FILTERED" - self.displayName[RUNNING] = "RUNNING" - self.displayName[PASS] = "PASSED" - self.displayName[SKIP] = "SKIPPED" - self.displayName[BATCH] = "BATCHED" - self.displayName[NOTBUILT] = "NOTBUILT" - self.displayName[EXPECTEDFAIL] = "EXPECTEDFAIL" - self.displayName[UNEXPECTEDPASS] = "UNEXPECTEDPASS" - - self.html_filename = config.report_html_file - - def initializeReportGroups(self): - testdir = {} - - # place testcases into groups - for reportcase in self.reportcases: - dirname = reportcase.testcase.dirname - if dirname not in testdir: - testdir[dirname] = [] - testdir[dirname].append(reportcase) - - self.groups = [ReportGroup(key, value) for key, value in testdir.items()] - - # place groups into a dictionary keyed on the group status - self.groupResults = {} - for status in STATUS: - self.groupResults[status] = [g for g in self.groups if g.status == status] - def report(self, refresh=0): - # potentially regenerate the html documentation for the test suite. - # # This doesn't seem to work: - # self.generateDocumentation() - sp = open(self.html_filename, 'w') - - if refresh: - if not any(g.status in (RUNNING, NOTRUN, INPROGRESS) for g in self.groups): - refresh = 0 - self.writeHeader(sp, refresh) self.writeSummary(sp) - if config.report_doc_link: - self.writeDoclink(sp) - - # Set the columns to display - if config.report_doc_link: - groupColumns = ("Name", "Custodian", "Status") - else: - groupColumns = ("Name", "Status") - - testcaseColumns = ("Status", "Name", "TestStep", "Age", "Elapsed", "Resources", "Output") - - # write the details - self.writeTable(sp, groupColumns, testcaseColumns) + self.writeTable(sp) self.writeFooter(sp) sp.close() - # launch the browser, if requested. - - self.browser() - - def writeRowHeader(self, sp, groupColumns, testcaseColumns): - header = f""" - - - - - - - """ - - for col in groupColumns: - if col == "Name": - header += '\n ' - elif col == "Custodian": - header += '\n ' - elif col == "Status": - header += '\n ' - else: - raise RuntimeError(f"Unknown column {col}") - - for col in testcaseColumns: - if col == "Status": - header += '\n ' - elif col == "Name": - header += '\n ' - elif col == "TestStep": - header += '\n ' - elif col == "Age": - header += '\n ' - elif col == "Elapsed": - header += '\n ' - elif col == "Resources": - header += '\n ' - elif col == "Output": - header += '\n ' - else: - raise RuntimeError(f"Unknown column {col}") - - header += """ - - """ - sp.write(header) - - def writeTable(self, sp, groupColumns, testcaseColumns): - colspan = len(groupColumns) + len(testcaseColumns) - header = f""" -
SUMMARY
{self.displayName[status]}
TEST GROUP TEST CASE
NAME CUSTODIAN STATUS STATUS NAME LAST
STEP
AGE ELAPSED RESOURCES OUTPUT
- - - - """ - - undocumented = [] - - rowcount = 0 - testgroups = [] - for status in STATUS: - testgroups.extend(self.groupResults[status]) - - for test in testgroups: - rowspan = len(test.testcases) - if rowcount <= 0: - self.writeRowHeader(sp, groupColumns, testcaseColumns) - rowcount += 30 - rowcount -= rowspan - - header += f""" - - - - """ - - elif col == "Custodian": - if config.report_doc_link: - owner = getowner(test.name, test.testcases[0].testcase) - if owner is not None: - header += f'\n ' - else: - header += f'\n ' - - elif col == "Status": - header += f'' - else: - raise RuntimeError(f"Unknown column {col}") - - for testcase in test.testcases: - for col in testcaseColumns: - - if col == "Status": - statusDisplay = self.displayName[testcase.status] - retries = getattr(testcase.testcase.atsGroup, "retries", 0) - if retries > 0: - statusDisplay += "
retry: %d" % retries - header += f'\n' - - elif col == "Name": - # If an .html file exists for this problem, create a reference to it - testref = "" - testlinksuffix = "" - if config.report_doc_link: - docfound = False - # first check for the full problem name, with the domain extension - testhtml = os.path.join(config.report_doc_dir, test.name, testcase.testcase.name + ".html") - if os.path.exists(testhtml): - docfound = True - else: - # next check for the full problem name without the domain extension - testhtml = os.path.join(config.report_doc_dir, test.name, - testcase.testcase.name + ".html") - if os.path.exists(testhtml): - docfound = True - else: - # final check for any of the input file names - for step in testcase.testcase.steps: - if getattr(step.p, "deck", None): - [inputname, suffix] = getattr(step.p, "deck").rsplit('.', 1) - testhtml = os.path.join(config.report_doc_dir, test.name, - inputname + ".html") - if os.path.exists(testhtml): - # match with the first input file - docfound = True - break - - if docfound: - testref = 'href="%s"' % (testhtml) - else: - if not testcase.testcase.nodoc: - testlinksuffix += '
undocumented' - undocumented.append(testcase.testcase.name) - - header += f"\n" - - elif col == "TestStep": - if testcase.laststep: - header += f"\n" - else: - header += "\n" - - elif col == "Age": - if not testcase.laststep: - header += "\n" - continue - - if testcase.diffage: - difftime = testcase.diffage - days = int(difftime) / 86400 - if days > 0: - difftime -= days * 86400 - hours = int(difftime) / 3600 - if days == 0: - # "New" diff file - don't color - header += f'\n' - elif days > 6: - # "Old" diff file (1+ week) - color reddish - header += f'\n' - else: - # less than a week old - but aging. Color yellowish - header += f'\n' - else: - header += "\n" - - elif col == "Elapsed": - if not testcase.elapsed: - header += "\n" - else: - header += f"\n" - - elif col == "Resources": - if not testcase.resources: - header += "\n" - else: - header += f"\n" - - elif col == "Output": - - header += "\n" - else: - raise RuntimeError(f"Unknown column {col}") - - header += '\n' - - header += '\n
DETAILED RESULTS
- """ - - for col in groupColumns: - if col == "Name": - header += f"""{test.name} -   {owner}  ' - header += '\n ?   {self.displayName[test.status]}{statusDisplay}{testcase.testcase.name}{testlinksuffix}{testcase.laststep.label()}    {hours}h{days}d{hours}h{days}d{hours}h    {ats.times.hms(testcase.elapsed)}  {ats.times.hms(testcase.resources)}" - seen = {} - for stepnum, step in enumerate(testcase.testcase.steps): - paths = testcase.testcase.resultPaths(step) - for p in paths: - # if p has already been accounted for, doesn't exist, or is an empty file, don't print it. - if (((p in seen) or not os.path.exists(p)) or (os.stat(p)[6] == 0)): - continue - header += f"\n{os.path.basename(p)}
" - seen[p] = 1 - header += "\n
' - - if config.report_doc_link: - header += '\n

Undocumented test problems:

' - header += '\n\n" - - sp.write(header) - def writeHeader(self, sp, refresh): gentime = time.strftime("%a, %d %b %Y %H:%M:%S") header = """ @@ -716,26 +171,24 @@ def writeHeader(self, sp, refresh): th,td {{ background-color:#EEEEEE }} td.probname {{ background-color: #CCCCCC; font-size: large ; text-align: center}} - td.red {{ background-color: #E10000; color: white }} - td.reddish {{ background-color: #FF6666; }} - td.orange {{ background-color: #FF9900; }} - td.orangish{{ background-color: #FFBB44; }} - td.yellow {{ background-color: #EDED00; }} - td.yellowish {{ background-color: #FFFF99; }} - td.green {{ background-color: #00C000; }} - td.greenyellow {{background-color: #99FF00; }} - td.blue {{ background-color: #0000FF; color: white }} - td.blueish {{ background-color: #33CCFF; }} - th.red {{ background-color: #E10000; color: white }} - th.reddish {{ background-color: #FF6666; }} - th.orange {{ background-color: #FF9900; }} - th.orangish{{ background-color: #FFBB44; }} - th.yellow {{ background-color: #EDED00; }} - th.yellowish {{ background-color: #FFFF99; }} - th.green {{ background-color: #00C000; }} - th.greenyellow {{background-color: #99FF00; }} - th.blue {{ background-color: #0000FF; color: white }} - th.blueish {{ background-color: #33CCFF; }} + + table {{ + font-family: arial, sans-serif; + border-collapse: collapse; + }} + + td {{ + border: 1px solid #dddddd; + text-align: left; + padding: 8px; + }} + + th {{ + border: 1px solid #dddddd; + background-color: #8f8f8f; + text-align: left; + padding: 8px; + }} @@ -753,94 +206,68 @@ def writeHeader(self, sp, refresh): else: username = os.getenv("USER") - header += f""" -

- - - -
- Test results: {gentime}
- User: {username}
- Platform: {platform}
- """ - - for line in config.report_notations: - header += f"{line}
" - - header += """
-

- """ - + header += "

GEOS ATS Report

\n

Configuration

\n" + table = [['Test Results', gentime], + ['User', username], + ['Platform', platform]] + header += tabulate(table, tablefmt='html') + header += '\n' sp.write(header) def writeSummary(self, sp): - summary = """ - - - - - - - - """ - - haveRetry = False - for status in STATUS: - cases = self.groupResults[status] - num = len(cases) - summary += f""" - - - - ' + table.append(row) - summary += '\n
SUMMARY
STATUS COUNT PROBLEM LIST
{self.displayName[status]}  {num} - """ - - if num > 0: - casestring = ' ' - for case in cases: - casename = case.name - caseref = case.name - retries = 0 - for test in case.testcases: - retries += getattr(test.testcase.atsGroup, "retries", 0) - if retries > 0: - haveRetry = True - casename += '*' - summary += f'\n {casename} ' - summary += '\n' - summary += casestring + link_pattern = '{}\n' + color_pattern = "

{}

" + header = ['Status', 'Count', 'Tests'] + table = [] + + for k, v in self.status_lists.items(): + status_formatted = color_pattern.format(COLORS[k], k) + test_links = [link_pattern.format(t, t) for t in v] + table.append([status_formatted, len(v), ', '.join(test_links)]) + + sp.write("\n\n

Summary

\n\n") + table_html = tabulate(table, headers=header, tablefmt='unsafehtml') + sp.write(table_html) + + def writeTable(self, sp): + header = ("Status", "Name", "TestStep", "Elapsed", "Resources", "Output") + + table = [] + table_filt = [] + file_pattern = "{}" + color_pattern = "

{}

" + + for k, v in self.test_results.items(): + status_str = v['status'].name + status_formatted = color_pattern.format(COLORS[status_str], k, status_str) + step_shortname = v['current_step'] + elapsed_formatted = hms(v['elapsed']) + output_files = [] + for s in v['steps'].values(): + if os.path.isfile(s['log']): + output_files.append(file_pattern.format(s['log'], os.path.basename(s['log']))) + if os.path.isfile(s['log'] + '.err'): + output_files.append(file_pattern.format(s['log'] + '.err', os.path.basename(s['log'] + '.err'))) + for pattern in s['output']: + for f in sorted(glob.glob(pattern)): + if (('restart' not in f) or ('.restartcheck' in f)) and os.path.isfile(f): + output_files.append(file_pattern.format(f, os.path.basename(f))) + + row = [status_formatted, k, step_shortname, elapsed_formatted, v['resources'], ', '.join(output_files)] + if status_str == 'FILTERED': + table_filt.append(row) else: - summary += '\n ' - - summary += '\n
' - if haveRetry: - summary += '\n* indicates that test was retried at least once.' + if len(table): + sp.write("\n\n

Active Tests

\n\n") + table_html = tabulate(table, headers=header, tablefmt='unsafehtml') + sp.write(table_html) - sp.write(summary) - - # Write link to documentation for html - def writeDoclink(self, sp): - doc = """ -

- Test problem names with a hyperlink have been documented, - the HTML version of which can be viewed by clicking on the link. - """ - - testdoc = os.path.join(config.report_doc_dir, 'testdoc.html') - testsumm = os.path.join(config.report_doc_dir, 'testdoc-summary.txt') - if os.path.exists(testdoc) and os.path.exists(testsumm): - doc += f""" -
- Or, you can click here for the - main page, or here for the - one page text summary. If the documentation appears out of - date, rerun 'atddoc' in this directory. - """ - - doc += '\n

' - sp.write(doc) + if len(table_filt): + sp.write("\n\n

Filtered Tests

\n\n") + table_html = tabulate(table_filt, headers=header, tablefmt='unsafehtml') + sp.write(table_html) def writeFooter(self, sp): footer = """ @@ -848,248 +275,3 @@ def writeFooter(self, sp): """ sp.write(footer) - - def browser(self): - if ReportHTML.launchedBrowser: - return - - if not config.browser: - return - - ReportHTML.launchedBrowser = True - command = config.browser_command.split() - command.append("file:%s" % config.report_html_file) - subprocess.Popen(command) - - -class ReportWait(ReportBase): - """This class is used while with the report_wait config option""" - - def __init__(self, testcases): - ReportBase.__init__(self, testcases) - self.testcases = testcases - - def report(self, fp): - """Write out the text report to the give file pointer""" - import time - - start = time.time() - sleeptime = 60 # interval to check (seconds) - - while True: - notdone = [] - for t in self.testcases: - t.testReport() - report = ReportTestCase(t) - if report.status in STATUS_NOTDONE: - notdone.append(t) - - if notdone: - rr = ReportText(self.testcases) - rr.writeSummary(sys.stdout, - (FAILRUN, UNEXPECTEDPASS, FAILRUNOPTIONAL, FAILCHECK, FAILCHECKMINOR, TIMEOUT, NOTRUN, - INPROGRESS, FILTERED, PASS, EXPECTEDFAIL, SKIP, BATCH, NOTBUILT)) - time.sleep(sleeptime) - else: - break - - -class ReportTestCase(object): - """This class represents the outcome from a TestCase. It hides - differences between off-line reports and the periodic reports - (when the actual ATS test object is known). In addition to - determining the testcase outcome, it also notes the last TestStep - that was run, age of the test, the total elapsed time and total - resources used.""" - - def __init__(self, testcase): - - self.testcase = testcase # test_case - self.status = None # One of the STATUS values (e.g. FAILRUN, PASS, etc.) - self.laststep = None - self.diffage = None - self.elapsed = 0.0 - self.resources = 0.0 - - now = time.time() - outcome = None - teststatus = testcase.status - - # The following algorithm determines the outcome for this testcase by looking at the TestCase's status object. - if teststatus is None: - self.status = NOTRUN - return - elif teststatus in (FILTERED, SKIP): - self.status = teststatus - return - else: - for stepnum, step in enumerate(testcase.steps): - - # Get the outcome and related information from the TestStep. - outcome, np, startTime, endTime = self._getStepInfo(step) - - if outcome == "PASS": - # So far so good, move on to the next step - dt = endTime - startTime - self.elapsed += dt - self.resources += np * dt - continue - if outcome == "EXPT": - dt = endTime - startTime - self.elapsed += dt - self.resources += np * dt - outcome = "EXPECTEDFAIL" - self.status = EXPECTEDFAIL - break # don't continue past an expected failure - if outcome == "UNEX": - dt = endTime - startTime - self.elapsed += dt - self.resources += np * dt - outcome = "UNEXPECTEDPASS" - self.status = UNEXPECTEDPASS - break # don't continue past an unexpected pass - elif outcome == "SKIP": - self.status = SKIP - break - elif outcome == "EXEC": - # the step is currently running, break - self.laststep = step - self.status = RUNNING - dt = now - startTime - self.elapsed += dt - self.resources += np * dt - break - - if outcome == "INIT" or outcome == "BACH": - if stepnum == 0: - # The TestCase is scheduled to run, but has not yet started. - if outcome == "BACH": - self.status = BATCH - else: - self.status = NOTRUN - - break - else: - # At least one step in the TestCase has started (and passed), but nothing is running now. - self.status = INPROGRESS - self.laststep = step - if endTime: - self.diffage = now - endTime - dt = endTime - startTime - self.elapsed += dt - self.resources += np * dt - break - elif outcome == "FILT": - # The test won't run because of a filter - self.status = FILTERED - else: - # One of the failure modes. - self.laststep = step - if endTime: - self.diffage = now - endTime - dt = endTime - startTime - self.elapsed += dt - self.resources += np * dt - if outcome == "TIME": - self.status = TIMEOUT - elif self.laststep.isCheck(): - if self.laststep.isMinor(): - self.status = FAILCHECKMINOR - else: - self.status = FAILCHECK - else: - if self.laststep.isMinor(): - self.status = FAILRUNOPTIONAL - else: - self.status = FAILRUN - try: - with open(step.p.stdout, 'r') as fp: - for line in fp: - if re.search(config.report_notbuilt_regexp, line): - self.status = NOTBUILT - break - except: - pass - break - - if outcome is None: - self.status = NOTRUN - - if outcome == "PASS": - # Don't set the laststep, but use it to get the endTime - self.status = PASS - laststep = step - self.diffage = 0.0 - # laststatus = teststatus.findStep(laststep) - # assert (laststatus) - # self.diffage = now - laststatus["endTime"] - - assert self.status in STATUS - - def _getStepInfo(self, teststep): - """This function hides the differences between the TestStatus - files and the information you can get from the ats test - object. It returns (status, np, startTime, endTime )""" - - atsTest = getattr(teststep, "atsTest", None) - endTime = None - startTime = None - - if atsTest is not None: - status = str(atsTest.status) - startTime = getattr(atsTest, "startTime", None) - endTime = getattr(atsTest, "endTime", None) - if status == "PASS" and atsTest.expectedResult == ats.FAILED: - status = "FAIL" - if status == "FAIL" and atsTest.expectedResult == ats.FAILED: - status = "UNEX" - else: - stepstatus = self.testcase.status.findStep(teststep) - if stepstatus is None: - status = "INIT" - else: - status = stepstatus["result"] - startTime = stepstatus["startTime"] - endTime = stepstatus["endTime"] - - np = getattr(teststep.p, "np", 1) - - if status in ("SKIP", "FILT", "INIT", "PASS", "FAIL", "TIME", "EXEC", "BACH", "EXPT", "UNEX"): - return (status, np, startTime, endTime) - else: - return ("SKIP", np, startTime, endTime) - - -class ReportGroup(object): - """A class to represent a group of TestCases. Currently, the only - grouping done is at the directory level: every testcase in a - directory belongs to the same ReportGroup.""" - - def __init__(self, groupName, testcases): - self.name = groupName - self.testcases = testcases - self.status = NOTRUN - if self.testcases: - self.status = min([case.status for case in self.testcases]) - assert self.status in STATUS - - def __cmp__(self, other): - return self.name == other.name - - -def getowner(dirname, testcase=None): - owner = "" - if not config.report_doc_link: - try: - atdfile = os.path.join(config.report_doc_dir, dirname, dirname + ".atd") - with open(atdfile, "r") as fp: - for line in fp: - match = re.search("CUSTODIAN:: +(.*)$", line) - if not match: - owner = match.group(1) - break - except IOError as e: - logger.debug(e) - if owner == "" and testcase and ("owner" in testcase.dictionary): - return testcase.dictionary["owner"] - return owner diff --git a/geos_ats_package/geos_ats/test_case.py b/geos_ats_package/geos_ats/test_case.py index fd9268f..a7a751a 100644 --- a/geos_ats_package/geos_ats/test_case.py +++ b/geos_ats_package/geos_ats/test_case.py @@ -1,23 +1,17 @@ import ats # type: ignore[import] import os -import sys import shutil -import errno import logging import glob import inspect from configparser import ConfigParser +from ats import atsut +from ats import (PASSED, FAILED, FILTERED, SKIPPED) +from geos_ats.common_utilities import Error, Log, removeLogDirectories +from geos_ats.configuration_record import config, globalTestTimings test = ats.manager.test testif = ats.manager.testif - -from geos_ats.suite_settings import testLabels, testOwners -from geos_ats.common_utilities import Error, Log, InfoTopic, TextTable, removeLogDirectories -from geos_ats.configuration_record import config, globalTestTimings -from geos_ats import reporting - -TESTS = {} -BASELINE_PATH = "baselines" logger = logging.getLogger('geos_ats') @@ -75,6 +69,7 @@ def initialize(self, name, desc, label=None, labels=None, steps=[], batch=Batch( self.dirname = os.getcwd() # Setup paths + log_dir = ats.tests.AtsTest.getOptions().get("logDir") working_relpath = os.path.relpath(self.dirname, ats_root_dir) working_root = ats.tests.AtsTest.getOptions().get("workingDir") working_dir = os.path.abspath(os.path.join(working_root, working_relpath, self.name)) @@ -92,35 +87,25 @@ def initialize(self, name, desc, label=None, labels=None, steps=[], batch=Batch( raise Exception() # Setup other parameters - self.atsGroup = None self.dictionary = {} self.dictionary.update(kw) self.nodoc = self.dictionary.get("nodoc", False) - self.status = None - self.outname = f"{self.name}.data" - self.errname = f"{self.name}.err" + self.last_status = None self.dictionary["name"] = self.name self.dictionary["test_directory"] = self.dirname self.dictionary["output_directory"] = working_dir self.dictionary["baseline_directory"] = baseline_directory - self.dictionary["testcase_out"] = self.outname - self.dictionary["testcase_err"] = self.errname + self.dictionary["log_directory"] = log_dir self.dictionary["testcase_name"] = self.name # Check for previous log information - log_dir = ats.tests.AtsTest.getOptions().get("logDir") log_file = os.path.join(log_dir, 'test_results.ini') if os.path.isfile(log_file): previous_config = ConfigParser() previous_config.read(log_file) for k, v in previous_config['Results'].items(): if self.name in v.split(';'): - self.status = reporting.status_map[k.upper()] - - if self.name in TESTS: - Error("Name already in use: %s" % self.name) - - TESTS[self.name] = self + self.last_status = atsut.StatusCode(k.upper()) # check for independent if config.override_np > 0: @@ -139,8 +124,6 @@ def initialize(self, name, desc, label=None, labels=None, steps=[], batch=Batch( # This check avoid testcases depending on themselves. self.depends = None - self.handleLabels(label, labels) - # complete the steps. # 1. update the steps with data from the dictionary # 2. substeps are inserted into the list of steps (the steps are flattened) @@ -155,7 +138,7 @@ def initialize(self, name, desc, label=None, labels=None, steps=[], batch=Batch( action = ats.tests.AtsTest.getOptions().get("action") if action in ("run", "rerun", "continue"): if self.dictionary.get("skip", None): - self.status = reporting.SKIP + self.status = SKIPPED return # Filtering tests on maxprocessors @@ -163,7 +146,7 @@ def initialize(self, name, desc, label=None, labels=None, steps=[], batch=Batch( if config.filter_maxprocessors != -1: if npMax > config.filter_maxprocessors: Log("# FILTER test=%s : max processors(%d > %d)" % (self.name, npMax, config.filter_maxprocessors)) - self.status = reporting.FILTERED + self.status = FILTERED return # Filtering tests on maxGPUS @@ -181,14 +164,14 @@ def initialize(self, name, desc, label=None, labels=None, steps=[], batch=Batch( if npMax > totalNumberOfProcessors: Log("# SKIP test=%s : not enough processors to run (%d > %d)" % (self.name, npMax, totalNumberOfProcessors)) - self.status = reporting.SKIP + self.status = SKIPPED return # If the machine doesn't specify a number of GPUs then it has none. totalNumberOfGPUs = getattr(ats.manager.machine, "getNumberOfGPUS", lambda: 1e90)() if ngpuMax > totalNumberOfGPUs: Log("# SKIP test=%s : not enough gpus to run (%d > %d)" % (self.name, ngpuMax, totalNumberOfGPUs)) - self.status = reporting.SKIP + self.status = SKIPPED return # filtering test steps based on action @@ -209,20 +192,6 @@ def initialize(self, name, desc, label=None, labels=None, steps=[], batch=Batch( reorderedSteps.append(step) self.steps = reorderedSteps - # filter based on previous results: - if action in ("run", "check", "continue"): - # if previously passed then skip - if self.status == reporting.PASS: - Log("# SKIP test=%s (previously passed)" % (self.name)) - # don't set status here, as we want the report to reflect the pass - return - - if action == "continue": - if self.status == reporting.FAILED: - Log("# SKIP test=%s (previously failed)" % (self.name)) - # don't set status here, as we want the report to reflect the pass - return - # Perform the action: if action in ("run", "continue"): Log("# run test=%s" % (self.name)) @@ -259,9 +228,12 @@ def initialize(self, name, desc, label=None, labels=None, steps=[], batch=Batch( else: Error("Unknown action?? %s" % action) + def logNames(self): + return sorted(glob.glob(os.path.join(self.dictionary["log_directory"], f'*{self.name}_*'))) + def resultPaths(self, step=None): """Return the paths to output files for the testcase. Used in reporting""" - paths = [self.outname, self.errname] + paths = [] if step: for x in step.resultPaths(): fullpath = os.path.join(self.path, x) @@ -270,11 +242,12 @@ def resultPaths(self, step=None): return paths + def cleanLogs(self): + for f in self.logNames(): + os.remove(f) + def testClean(self): - if os.path.exists(self.outname): - os.remove(self.outname) - if os.path.exists(self.errname): - os.remove(self.errname) + self.cleanLogs() for step in self.steps: step.clean() @@ -299,7 +272,6 @@ def _remove(path): # remove extra files if len(self.steps) > 0: _remove(config.report_html_file) - _remove(config.report_text_file) _remove(self.path) _remove("*.core") _remove("core") @@ -324,15 +296,8 @@ def findMaxNumberOfGPUs(self): return gpuMax def testCreate(self): - atsTest = None - keep = ats.tests.AtsTest.getOptions().get("keep") - - # remove outname - if os.path.exists(self.outname): - os.remove(self.outname) - if os.path.exists(self.errname): - os.remove(self.errname) - + # Remove old logs + self.cleanLogs() maxnp = 1 for stepnum, step in enumerate(self.steps): np = getattr(step.p, "np", 1) @@ -345,31 +310,14 @@ def testCreate(self): else: priority = 1 - # start a group + # Setup a new test group + atsTest = None ats.tests.AtsTest.newGroup(priority=priority) - - # keep a reference to the ats test group - self.atsGroup = ats.tests.AtsTest.group - - # if depends - if self.depends: - priorTestCase = TESTS.get(self.depends, None) - if priorTestCase is None: - Log("Warning: Test %s depends on testcase %s, which is not scheduled to run" % - (self.name, self.depends)) - else: - if priorTestCase.steps: - atsTest = getattr(priorTestCase.steps[-1], "atsTest", None) - for stepnum, step in enumerate(self.steps): - np = getattr(step.p, "np", 1) ngpu = getattr(step.p, "ngpu", 0) executable = step.executable() args = step.makeArgs() - - # set the label - # label = "%s/%s_%d_%s" % (self.dirname, self.name, stepnum + 1, step.label()) label = "%s_%d_%s" % (self.name, stepnum + 1, step.label()) # call either 'test' or 'testif' @@ -378,12 +326,10 @@ def testCreate(self): else: func = lambda *a, **k: testif(atsTest, *a, **k) - # timelimit + # Set the time limit kw = {} - if self.batch.enabled: kw["timelimit"] = self.batch.duration - if (step.timelimit() and not config.override_timelimit): kw["timelimit"] = step.timelimit() else: @@ -398,23 +344,9 @@ def testCreate(self): independent=self.independent, batch=self.batch.enabled, **kw) + atsTest.step_outputs = step.resultPaths() - # ats test gets a reference to the TestStep and the TestCase - atsTest.geos_atsTestCase = self - atsTest.geos_atsTestStep = step - - # TestStep gets a reference to the atsTest - step.atsTest = atsTest - - # set the expected result - if step.expectedResult() == "FAIL" or step.expectedResult() is False: - atsTest.expectedResult = ats.FAILED - # The ATS does not permit tests to depend on failed tests. - # therefore we need to break here - self.steps = self.steps[:stepnum + 1] - break - - # end the group + # End the group ats.tests.AtsTest.endGroup() def commandLine(self, step): @@ -468,28 +400,12 @@ def testRebaseline(self): def testRebaselineFailed(self): config.rebaseline_ask = False - if self.status == reporting.FAILED: + if self.last_status == FAILED: self.testRebaseline() def testList(self): Log("# test=%s : labels=%s" % (self.name.ljust(32), " ".join(self.labels))) - def handleLabels(self, label, labels): - """set the labels, and verify they are known to the system, the avoid typos""" - if labels is not None and label is not None: - Error("specify only one of 'label' or 'labels'") - - if label is not None: - self.labels = [label] - elif labels is not None: - self.labels = labels - else: - self.labels = [] - - for x in self.labels: - if x not in testLabels: - Error(f"unknown label {x}. run 'geos_ats -i labels' for a list") - # Make available to the tests ats.manager.define(TestCase=TestCase) diff --git a/geos_ats_package/geos_ats/test_steps.py b/geos_ats_package/geos_ats/test_steps.py index 671465f..b1aa190 100644 --- a/geos_ats_package/geos_ats/test_steps.py +++ b/geos_ats_package/geos_ats/test_steps.py @@ -499,12 +499,8 @@ def makeArgs(self): return list(map(str, args)) def resultPaths(self): - paths = [] name = getGeosProblemName(self.p.deck, self.p.name) - paths += [os.path.join(self.p.output_directory, "%s_restart_*") % name] - paths += [os.path.join(self.p.output_directory, "silo*")] - paths += [os.path.join(self.p.output_directory, "%s_bp_*" % name)] - + paths = [os.path.join(self.p.output_directory, f"{name}_restart_*")] return paths def clean(self): @@ -759,9 +755,7 @@ def rebaseline(self): shutil.copyfile(self.target_file, self.baseline_file) def resultPaths(self): - figure_pattern = os.path.join(self.figure_root, '*.png') - figure_list = sorted(glob.glob(figure_pattern)) - return [self.target_file] + figure_list + return [self.target_file, os.path.join(self.figure_root, '*.png')] def clean(self): self._clean(self.resultPaths()) diff --git a/geos_ats_package/setup.cfg b/geos_ats_package/setup.cfg index 66a890a..2d64f43 100644 --- a/geos_ats_package/setup.cfg +++ b/geos_ats_package/setup.cfg @@ -16,6 +16,7 @@ install_requires = mpi4py numpy lxml + tabulate ats @ https://github.com/LLNL/ATS/archive/refs/tags/7.0.105.tar.gz python_requires = >=3.7 From 6035da8f1a710c41e63e15a539afaf616573ce69 Mon Sep 17 00:00:00 2001 From: Christopher Sherman Date: Mon, 29 Jan 2024 12:14:53 -0800 Subject: [PATCH 03/14] Resolving merge conflicts --- geos_ats_package/geos_ats/common_utilities.py | 200 +++++++++--------- .../geos_ats/configuration_record.py | 4 +- .../geos_ats/machines/batchGeosatsMoab.py | 4 +- geos_ats_package/geos_ats/main.py | 2 +- geos_ats_package/geos_ats/reporting.py | 28 ++- geos_ats_package/geos_ats/test_case.py | 8 +- geos_ats_package/geos_ats/test_steps.py | 8 +- 7 files changed, 130 insertions(+), 124 deletions(-) diff --git a/geos_ats_package/geos_ats/common_utilities.py b/geos_ats_package/geos_ats/common_utilities.py index 3e32db1..7543df1 100644 --- a/geos_ats_package/geos_ats/common_utilities.py +++ b/geos_ats_package/geos_ats/common_utilities.py @@ -10,88 +10,88 @@ # Common code for displaying information to the user. ################################################################################ -logger = logging.getLogger( 'geos_ats' ) +logger = logging.getLogger('geos_ats') -def Error( msg ): - raise RuntimeError( "Error: %s" % msg ) +def Error(msg): + raise RuntimeError("Error: %s" % msg) -def Log( msg ): +def Log(msg): import ats # type: ignore[import] testmode = False try: - testmode = ats.tests.AtsTest.getOptions().get( "testmode" ) + testmode = ats.tests.AtsTest.getOptions().get("testmode") except AttributeError as e: - logger.debug( e ) + logger.debug(e) if testmode: - ats.log( "ALEATS: " + msg, echo=True ) + ats.log("ALEATS: " + msg, echo=True) else: - ats.log( msg, echo=True ) + ats.log(msg, echo=True) -class TextTable( object ): +class TextTable(object): - def __init__( self, columns ): + def __init__(self, columns): self.table = [] self.sep = " : " self.indent = " " self.columns = columns - self.colmax = [ None ] * columns + self.colmax = [None] * columns self.maxwidth = self._getwidth() self.rowbreak = None self.rowbreakstyle = " " - def _getwidth( self ): + def _getwidth(self): maxwidth = 100 if os.name == "posix": try: - sttyout = subprocess.Popen( [ "stty", "size" ], stdout=subprocess.PIPE ).communicate()[ 0 ] - maxwidth = int( sttyout.split()[ 1 ] ) + sttyout = subprocess.Popen(["stty", "size"], stdout=subprocess.PIPE).communicate()[0] + maxwidth = int(sttyout.split()[1]) except: # If the stty size approach does not work, the use a default maxwidth - logger.debug( "Using default maxwidth" ) + logger.debug("Using default maxwidth") return maxwidth - def setHeader( self, *row ): - assert ( len( row ) == self.columns ) - self.table.insert( 0, row ) - self.table.insert( 1, None ) + def setHeader(self, *row): + assert (len(row) == self.columns) + self.table.insert(0, row) + self.table.insert(1, None) - def addRowBreak( self ): - self.table.append( None ) + def addRowBreak(self): + self.table.append(None) - def addRow( self, *row ): - assert ( len( row ) == self.columns ) - self.table.append( row ) + def addRow(self, *row): + assert (len(row) == self.columns) + self.table.append(row) - def setColMax( self, colindex, max ): - self.colmax[ colindex ] = max + def setColMax(self, colindex, max): + self.colmax[colindex] = max - def printTable( self, outfile=sys.stdout ): + def printTable(self, outfile=sys.stdout): table_str = '' - if len( self.table ) == 0: + if len(self.table) == 0: return # find the max column sizes colWidth = [] - for i in range( self.columns ): - colWidth.append( max( [ len( str( row[ i ] ) ) for row in self.table if row is not None ] ) ) + for i in range(self.columns): + colWidth.append(max([len(str(row[i])) for row in self.table if row is not None])) # adjust the colWidths down if colmax is step - for i in range( self.columns ): - if self.colmax[ i ] is not None: - if colWidth[ i ] > self.colmax[ i ]: - colWidth[ i ] = self.colmax[ i ] + for i in range(self.columns): + if self.colmax[i] is not None: + if colWidth[i] > self.colmax[i]: + colWidth[i] = self.colmax[i] # last column is floating - total = sum( colWidth ) + self.columns * ( 1 + len( self.sep ) ) + len( self.indent ) + total = sum(colWidth) + self.columns * (1 + len(self.sep)) + len(self.indent) if total > self.maxwidth: - colWidth[ -1 ] = max( 10, self.maxwidth - ( total - colWidth[ -1 ] ) ) + colWidth[-1] = max(10, self.maxwidth - (total - colWidth[-1])) # output the table rowbreakindex = 0 @@ -99,14 +99,14 @@ def printTable( self, outfile=sys.stdout ): # row break controls. # if row is None then this is a break - addBreak = ( row is None ) or ( self.rowbreak and rowbreakindex > 0 and rowbreakindex % self.rowbreak == 0 ) + addBreak = (row is None) or (self.rowbreak and rowbreakindex > 0 and rowbreakindex % self.rowbreak == 0) if addBreak: table_str += self.indent - for i in range( self.columns ): + for i in range(self.columns): if i < self.columns - 1: table_str += f"{self.rowbreakstyle * colWidth[i]}{self.sep}" else: - table_str += self.rowbreakstyle * colWidth[ i ] + table_str += self.rowbreakstyle * colWidth[i] table_str += '\n' if row is None: @@ -118,27 +118,27 @@ def printTable( self, outfile=sys.stdout ): # determine how many lines are needed by each column of this row. lines = [] - for i in range( self.columns ): - if isinstance( row[ i ], str ): - drow = textwrap.dedent( row[ i ] ) + for i in range(self.columns): + if isinstance(row[i], str): + drow = textwrap.dedent(row[i]) else: - drow = str( row[ i ] ) + drow = str(row[i]) if i == self.columns - 1: - lines.append( textwrap.wrap( drow, colWidth[ i ], break_long_words=False ) ) + lines.append(textwrap.wrap(drow, colWidth[i], break_long_words=False)) else: - lines.append( textwrap.wrap( drow, colWidth[ i ], break_long_words=True ) ) + lines.append(textwrap.wrap(drow, colWidth[i], break_long_words=True)) - maxlines = max( [ len( x ) for x in lines ] ) + maxlines = max([len(x) for x in lines]) # output the row - for j in range( maxlines ): + for j in range(maxlines): table_str += self.indent - for i in range( self.columns ): - if len( lines[ i ] ) > j: - entry = lines[ i ][ j ].ljust( colWidth[ i ] ) + for i in range(self.columns): + if len(lines[i]) > j: + entry = lines[i][j].ljust(colWidth[i]) else: - entry = " ".ljust( colWidth[ i ] ) + entry = " ".ljust(colWidth[i]) if i < self.columns - 1: table_str += f"{entry}{self.sep}" @@ -147,108 +147,106 @@ def printTable( self, outfile=sys.stdout ): table_str += '\n' - outfile.write( table_str ) + outfile.write(table_str) -class InfoTopic( object ): +class InfoTopic(object): - def __init__( self, topic, outfile=sys.stdout ): + def __init__(self, topic, outfile=sys.stdout): self.topic = topic self.subtopics = [] self.outfile = outfile - def addTopic( self, topic, brief, function ): - self.subtopics.append( ( topic, brief, function ) ) + def addTopic(self, topic, brief, function): + self.subtopics.append((topic, brief, function)) - def startBanner( self ): - self.outfile.write( "=" * 80 + '\n' ) - self.outfile.write( self.topic.center( 80 ) ) - self.outfile.write( "\n" + "=" * 80 + '\n' ) + def startBanner(self): + self.outfile.write("=" * 80 + '\n') + self.outfile.write(self.topic.center(80)) + self.outfile.write("\n" + "=" * 80 + '\n') - def endBanner( self ): - self.outfile.write( "." * 80 + '\n' ) + def endBanner(self): + self.outfile.write("." * 80 + '\n') - def findTopic( self, topicName ): + def findTopic(self, topicName): for topic in self.subtopics: - if topic[ 0 ] == topicName: + if topic[0] == topicName: return topic return None - def displayMenu( self ): + def displayMenu(self): self.startBanner() - table = TextTable( 3 ) - for i, topic in enumerate( self.subtopics ): - table.addRow( i, topic[ 0 ], topic[ 1 ] ) + table = TextTable(3) + for i, topic in enumerate(self.subtopics): + table.addRow(i, topic[0], topic[1]) - table.addRow( i + 1, "exit", "" ) + table.addRow(i + 1, "exit", "") table.printTable() import ats - if ats.tests.AtsTest.getOptions().get( "testmode" ): + if ats.tests.AtsTest.getOptions().get("testmode"): return while True: - logger.info( "Enter a topic: " ) + logger.info("Enter a topic: ") sys.stdout.flush() try: line = sys.stdin.readline() except KeyboardInterrupt as e: - logger.debug( e ) + logger.debug(e) return None value = line.strip() - topic = self.findTopic( value ) + topic = self.findTopic(value) if topic: return topic try: - index = int( value ) - if index >= 0 and index < len( self.subtopics ): - return self.subtopics[ index ] - if index == len( self.subtopics ): + index = int(value) + if index >= 0 and index < len(self.subtopics): + return self.subtopics[index] + if index == len(self.subtopics): return None except ValueError as e: - logger.debug( e ) + logger.debug(e) - def process( self, args ): + def process(self, args): - if len( args ) == 0: + if len(args) == 0: topic = self.displayMenu() if topic is not None: - topic[ 2 ]() + topic[2]() else: - topicName = args[ 0 ] - topic = self.findTopic( topicName ) + topicName = args[0] + topic = self.findTopic(topicName) if topic: - topic[ 2 ]( *args[ 1: ] ) + topic[2](*args[1:]) else: - logger.warning( f"unknown topic: {topicName}" ) + logger.warning(f"unknown topic: {topicName}") -def removeLogDirectories( dir ): +def removeLogDirectories(dir): # look for subdirs containing 'ats.log' and 'geos_ats.config' # look for symlinks that point to such a directory - files = os.listdir( dir ) + files = os.listdir(dir) deldir = [] for f in files: - ff = os.path.join( dir, f ) - if os.path.isdir( ff ) and not os.path.islink( ff ): + ff = os.path.join(dir, f) + if os.path.isdir(ff) and not os.path.islink(ff): tests = [ - all( [ - os.path.exists( os.path.join( ff, "ats.log" ) ), - os.path.exists( os.path.join( ff, "geos_ats.config" ) ) - ] ), - f.find( "TestLogs." ) == 0 + all([os.path.exists(os.path.join(ff, "ats.log")), + os.path.exists(os.path.join(ff, "geos_ats.config"))]), + f.find("TestLogs.") == 0 ] - if any( tests ): - deldir.append( ff ) - shutil.rmtree( ff ) + if any(tests): + deldir.append(ff) + shutil.rmtree(ff) for f in files: - ff = os.path.join( dir, f ) - if os.path.islink( ff ): - pointsto = os.path.realpath( ff ) + ff = os.path.join(dir, f) + if os.path.islink(ff): + pointsto = os.path.realpath(ff) if pointsto in deldir: - os.remove( ff ) + os.remove(ff) diff --git a/geos_ats_package/geos_ats/configuration_record.py b/geos_ats_package/geos_ats/configuration_record.py index 8d251c5..1d0291d 100644 --- a/geos_ats_package/geos_ats/configuration_record.py +++ b/geos_ats_package/geos_ats/configuration_record.py @@ -90,7 +90,7 @@ def __getattr__(self, name): # The global config object config = Config() # Global testTimings object -globalTestTimings = {} # type: ignore[var-annotated] +globalTestTimings = {} # type: ignore[var-annotated] # Depth of testconfig recursion configDepth = 0 @@ -98,7 +98,7 @@ def __getattr__(self, name): def infoConfigShow(public, outfile=sys.stdout): topic = InfoTopic("config show", outfile) topic.startBanner() - import ats # type: ignore[import] + import ats # type: ignore[import] keys = sorted(config._items.keys()) table = TextTable(3) diff --git a/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py b/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py index bc5747d..e2bb4e7 100644 --- a/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py +++ b/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py @@ -1,9 +1,9 @@ #BATS:batchGeosatsMoab batchGeosatsMoab BatchGeosatsMoab -1 -from ats import machines, configuration, log, atsut, times, AtsTest # type: ignore[import] +from ats import machines, configuration, log, atsut, times, AtsTest # type: ignore[import] import subprocess, sys, os, time, socket, re import utils # type: ignore[import] -from batch import BatchMachine # type: ignore[import] +from batch import BatchMachine # type: ignore[import] import logging debug = configuration.debug diff --git a/geos_ats_package/geos_ats/main.py b/geos_ats_package/geos_ats/main.py index b22af5b..033d0f5 100644 --- a/geos_ats_package/geos_ats/main.py +++ b/geos_ats_package/geos_ats/main.py @@ -327,7 +327,7 @@ def main(): geos_atsStartTime = time.time() # Note: the sys.argv is read here by default - import ats # type: ignore[import] + import ats # type: ignore[import] ats.manager.init() logger.debug('Copying options to the geos_ats config record file') config.copy_values(ats.manager.machine) diff --git a/geos_ats_package/geos_ats/reporting.py b/geos_ats_package/geos_ats/reporting.py index 633f5dd..1b35a0e 100644 --- a/geos_ats_package/geos_ats/reporting.py +++ b/geos_ats_package/geos_ats/reporting.py @@ -7,15 +7,13 @@ import glob import logging from ats.times import hms -from ats import (PASSED, FAILED, TIMEDOUT, EXPECTED, BATCHED, FILTERED, - SKIPPED, CREATED, RUNNING, HALTED, LSFERROR) +from ats import (PASSED, FAILED, TIMEDOUT, EXPECTED, BATCHED, FILTERED, SKIPPED, CREATED, RUNNING, HALTED, LSFERROR) # Get the active logger instance logger = logging.getLogger('geos_ats') # Status value in priority order -STATUS = (EXPECTED, CREATED, BATCHED, FILTERED, SKIPPED, RUNNING, - PASSED, TIMEDOUT, HALTED, LSFERROR, FAILED) +STATUS = (EXPECTED, CREATED, BATCHED, FILTERED, SKIPPED, RUNNING, PASSED, TIMEDOUT, HALTED, LSFERROR, FAILED) COLORS = {} COLORS[EXPECTED.name] = "black" @@ -47,15 +45,27 @@ def __init__(self, test_steps): for t in test_steps: # Parse the test step name - step_name = t.name[t.name.find('(')+1:t.name.rfind('_')] + step_name = t.name[t.name.find('(') + 1:t.name.rfind('_')] test_name = step_name[:step_name.rfind('_')] test_id = t.group.number group_name = test_name[:test_name.rfind('_')] # Save data if test_name not in self.test_results: - self.test_results[test_name] = {'steps': {}, 'status': EXPECTED, 'id': test_id, 'elapsed': 0.0, 'current_step': ' ', 'resources': t.np} - self.test_results[test_name]['steps'][t.name] = {'status': t.status, 'log': t.outname, 'output': t.step_outputs, 'number': t.groupSerialNumber} + self.test_results[test_name] = { + 'steps': {}, + 'status': EXPECTED, + 'id': test_id, + 'elapsed': 0.0, + 'current_step': ' ', + 'resources': t.np + } + self.test_results[test_name]['steps'][t.name] = { + 'status': t.status, + 'log': t.outname, + 'output': t.step_outputs, + 'number': t.groupSerialNumber + } # Check elapsed time elapsed = 0.0 @@ -207,9 +217,7 @@ def writeHeader(self, sp, refresh): username = os.getenv("USER") header += "

GEOS ATS Report

\n

Configuration

\n" - table = [['Test Results', gentime], - ['User', username], - ['Platform', platform]] + table = [['Test Results', gentime], ['User', username], ['Platform', platform]] header += tabulate(table, tablefmt='html') header += '\n' sp.write(header) diff --git a/geos_ats_package/geos_ats/test_case.py b/geos_ats_package/geos_ats/test_case.py index a7a751a..2a43658 100644 --- a/geos_ats_package/geos_ats/test_case.py +++ b/geos_ats_package/geos_ats/test_case.py @@ -1,4 +1,4 @@ -import ats # type: ignore[import] +import ats # type: ignore[import] import os import shutil import logging @@ -33,8 +33,8 @@ def __init__(self, enabled=True, duration="1h", ppn=0, altname=None): logger.error(e) Error("bad time specification: %s" % duration) - self.ppn = ppn # processor per node - self.altname = altname # alternate name to use when launcing the batch job + self.ppn = ppn # processor per node + self.altname = altname # alternate name to use when launcing the batch job class TestCase(object): @@ -263,7 +263,7 @@ def _remove(path): else: os.remove(p) except OSError: - pass # so that two simultaneous clean operations don't fail + pass # so that two simultaneous clean operations don't fail # clean self.testClean() diff --git a/geos_ats_package/geos_ats/test_steps.py b/geos_ats_package/geos_ats/test_steps.py index b1aa190..3f471de 100644 --- a/geos_ats_package/geos_ats/test_steps.py +++ b/geos_ats_package/geos_ats/test_steps.py @@ -1,5 +1,5 @@ import os -import ats # type: ignore[import] +import ats # type: ignore[import] import glob import shutil import sys @@ -265,7 +265,7 @@ def _remove(self, paths, noclean): else: os.remove(p) except OSError as e: - logger.debug(e) # so that two simultaneous clean operations don't fail + logger.debug(e) # so that two simultaneous clean operations don't fail def getCheckOption(self): return ats.tests.AtsTest.getOptions().get("checkoption") @@ -388,8 +388,8 @@ class geos(TestStepBase): params = TestStepBase.defaultParams + ( TestStepBase.commonParams["name"], TestStepBase.commonParams["deck"], TestStepBase.commonParams["np"], TestStepBase.commonParams["ngpu"], TestStepBase.commonParams["check"], - TestStepBase.commonParams["test_directory"], TestStepBase.commonParams["baseline_directory"], TestStepBase.commonParams["output_directory"], - TestParam("restart_file", "The name of the restart file."), + TestStepBase.commonParams["test_directory"], TestStepBase.commonParams["baseline_directory"], + TestStepBase.commonParams["output_directory"], TestParam("restart_file", "The name of the restart file."), TestParam("x_partitions", "The number of partitions in the x direction."), TestParam("y_partitions", "The number of partitions in the y direction."), TestParam("z_partitions", From 2d9c5a4da5faaa4fcef1e6b49fb231b2cc3ed78c Mon Sep 17 00:00:00 2001 From: Christopher Sherman Date: Mon, 22 Jan 2024 16:28:35 -0800 Subject: [PATCH 04/14] Removing unused file --- geos_ats_package/geos_ats/rules.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 geos_ats_package/geos_ats/rules.py diff --git a/geos_ats_package/geos_ats/rules.py b/geos_ats_package/geos_ats/rules.py deleted file mode 100644 index e69de29..0000000 From 8c5f93ccb722dbce0ef137e84974e60f4e7a28b8 Mon Sep 17 00:00:00 2001 From: Christopher Sherman Date: Mon, 29 Jan 2024 12:15:10 -0800 Subject: [PATCH 05/14] Resolving merge conflicts --- .../geos_ats/environment_setup.py | 69 ++++++++++--------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/geos_ats_package/geos_ats/environment_setup.py b/geos_ats_package/geos_ats/environment_setup.py index 509373f..2864a8e 100644 --- a/geos_ats_package/geos_ats/environment_setup.py +++ b/geos_ats_package/geos_ats/environment_setup.py @@ -4,57 +4,58 @@ import argparse -def setup_ats( src_path, build_path, ats_xargs, ats_machine, ats_machine_dir ): - bin_dir = os.path.join( build_path, "bin" ) - geos_ats_fname = os.path.join( bin_dir, "run_geos_ats" ) - ats_dir = os.path.abspath( os.path.join( src_path, "integratedTests", "tests", "allTests" ) ) - test_path = os.path.join( build_path, "integratedTests" ) - link_path = os.path.join( test_path, "integratedTests" ) - run_script_fname = os.path.join( test_path, "geos_ats.sh" ) - log_dir = os.path.join( test_path, "TestResults" ) - - # Create a symbolic link to test directory - if os.path.islink( link_path ): - print( 'integratedTests symlink already exists' ) +def setup_ats(src_path, build_path, baseline_dir, working_dir, ats_xargs, ats_machine, ats_machine_dir): + bin_dir = os.path.join(build_path, "bin") + geos_ats_fname = os.path.join(bin_dir, "run_geos_ats") + test_path = os.path.join(build_path, "integratedTests") + link_path = os.path.join(test_path, "integratedTests") + run_script_fname = os.path.join(test_path, "geos_ats.sh") + log_dir = os.path.join(test_path, "TestResults") + baseline_dir = os.path.abspath(baseline_dir) + working_dir = os.path.abspath(working_dir) + ats_main_file = os.path.abspath(os.path.join(src_path, 'inputFiles', 'main.ats')) + + # Create a symbolic link to working directory + os.makedirs(working_dir, exist_ok=True) + if os.path.islink(link_path): + print('integratedTests symlink already exists') else: - os.symlink( ats_dir, link_path ) + os.symlink(working_dir, link_path) # Build extra arguments that should be passed to ATS - joined_args = [ ' '.join( x ) for x in ats_xargs ] - ats_args = ' '.join( [ f'--ats {x}' for x in joined_args ] ) + joined_args = [' '.join(x) for x in ats_xargs] + ats_args = ' '.join([f'--ats {x}' for x in joined_args]) if ats_machine: ats_args += f' --machine {ats_machine}' if ats_machine_dir: ats_args += f' --machine-dir {ats_machine_dir}' # Write the bash script to run ats. - with open( run_script_fname, "w" ) as g: - g.write( "#!/bin/bash\n" ) - g.write( f"{geos_ats_fname} {bin_dir} --workingDir {ats_dir} --logs {log_dir} {ats_args} \"$@\"\n" ) + with open(run_script_fname, "w") as g: + g.write("#!/bin/bash\n") + g.write(f"{geos_ats_fname} {bin_dir} {ats_main_file} --workingDir {working_dir} --baselineDir {baseline_dir} --logs {log_dir} {ats_args} \"$@\"\n") # Make the script executable - st = os.stat( run_script_fname ) - os.chmod( run_script_fname, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH ) + st = os.stat(run_script_fname) + os.chmod(run_script_fname, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) def main(): # Cmake may combine the final arguments into a string literal # Manually unpack those before parsing - final_arg = sys.argv.pop( -1 ) - sys.argv.extend( final_arg.split() ) - - parser = argparse.ArgumentParser( description="Setup ATS script" ) - parser.add_argument( "src_path", type=str, help="GEOS src path" ) - parser.add_argument( "build_path", type=str, help="GEOS build path" ) - parser.add_argument( "--ats", - nargs='+', - default=[], - action="append", - help="Arguments that should be passed to ats" ) - parser.add_argument( "--machine", type=str, default='', help="ATS machine name" ) - parser.add_argument( "--machine-dir", type=str, default='', help="ATS machine directory" ) + final_arg = sys.argv.pop(-1) + sys.argv.extend(final_arg.split()) + + parser = argparse.ArgumentParser(description="Setup ATS script") + parser.add_argument("src_path", type=str, help="GEOS src path") + parser.add_argument("build_path", type=str, help="GEOS build path") + parser.add_argument("baseline_dir", type=str, help="GEOS test baseline root directory") + parser.add_argument("working_dir", type=str, help="GEOS test working root directory") + parser.add_argument("--ats", nargs='+', default=[], action="append", help="Arguments that should be passed to ats") + parser.add_argument("--machine", type=str, default='', help="ATS machine name") + parser.add_argument("--machine-dir", type=str, default='', help="ATS machine directory") options, unkown_args = parser.parse_known_args() - setup_ats( options.src_path, options.build_path, options.ats, options.machine, options.machine_dir ) + setup_ats(options.src_path, options.build_path, options.baseline_dir, options.working_dir, options.ats, options.machine, options.machine_dir) if __name__ == '__main__': From 74e08d7d0d00d9eeadf04c7a0a65303b2cf31b4a Mon Sep 17 00:00:00 2001 From: Christopher Sherman Date: Mon, 29 Jan 2024 12:15:29 -0800 Subject: [PATCH 06/14] Resolving merge conflicts --- .../geos_ats/command_line_parsers.py | 2 + geos_ats_package/geos_ats/main.py | 7 +- geos_ats_package/geos_ats/test_builder.py | 145 ++++++++++-------- 3 files changed, 88 insertions(+), 66 deletions(-) diff --git a/geos_ats_package/geos_ats/command_line_parsers.py b/geos_ats_package/geos_ats/command_line_parsers.py index 1c3ffc7..2af931d 100644 --- a/geos_ats_package/geos_ats/command_line_parsers.py +++ b/geos_ats_package/geos_ats/command_line_parsers.py @@ -84,6 +84,8 @@ def build_command_line_parser(): parser.add_argument("-l", "--logs", type=str, default=None) + parser.add_argument("-f", "--allow-failed-tests", default=False, action='store_true') + parser.add_argument( "--failIfTestsFail", action="store_true", diff --git a/geos_ats_package/geos_ats/main.py b/geos_ats_package/geos_ats/main.py index 033d0f5..f98c372 100644 --- a/geos_ats_package/geos_ats/main.py +++ b/geos_ats_package/geos_ats/main.py @@ -5,7 +5,7 @@ import subprocess import time import logging -from geos_ats import command_line_parsers +from geos_ats import command_line_parsers, test_builder test_actions = ("run", "rerun", "check", "continue") report_actions = ("run", "rerun", "report", "continue") @@ -369,6 +369,11 @@ def main(): # Run ATS # --------------------------------- result = ats.manager.core() + if len(test_builder.test_build_failures): + tmp = ', '.join(test_builder.test_build_failures) + logger.error(f'The following ATS test failed to build: {tmp}') + if not options.allow_failed_tests: + raise Exception('Some tests failed to build') # Make sure all the testcases requested were found if testcases != "all": diff --git a/geos_ats_package/geos_ats/test_builder.py b/geos_ats_package/geos_ats/test_builder.py index 83f9ca7..156a85c 100644 --- a/geos_ats_package/geos_ats/test_builder.py +++ b/geos_ats_package/geos_ats/test_builder.py @@ -7,43 +7,47 @@ from dataclasses import dataclass, asdict from ats.tests import AtsTest from lxml import etree +import logging from .test_steps import geos from .test_case import TestCase +test_build_failures = [] +logger = logging.getLogger('geos_ats') -@dataclass( frozen=True ) + +@dataclass(frozen=True) class RestartcheckParameters: atol: float rtol: float - def as_dict( self ): - return asdict( self ) + def as_dict(self): + return asdict(self) -@dataclass( frozen=True ) +@dataclass(frozen=True) class CurveCheckParameters: filename: str - tolerance: Iterable[ float ] - curves: List[ List[ str ] ] - script_instructions: Iterable[ Iterable[ str ] ] = None + tolerance: Iterable[float] + curves: List[List[str]] + script_instructions: Iterable[Iterable[str]] = None time_units: str = "seconds" - def as_dict( self ): - return asdict( self ) + def as_dict(self): + return asdict(self) -@dataclass( frozen=True ) +@dataclass(frozen=True) class TestDeck: name: str description: str - partitions: Iterable[ Tuple[ int, int, int ] ] + partitions: Iterable[Tuple[int, int, int]] restart_step: int check_step: int restartcheck_params: RestartcheckParameters = None curvecheck_params: CurveCheckParameters = None -def collect_block_names( fname ): +def collect_block_names(fname): """ Collect block names in an xml file @@ -54,35 +58,35 @@ def collect_block_names( fname ): dict: Pairs of top-level block names and lists of child block names """ pwd = os.getcwd() - actual_dir, actual_fname = os.path.split( os.path.realpath( fname ) ) - os.chdir( actual_dir ) + actual_dir, actual_fname = os.path.split(os.path.realpath(fname)) + os.chdir(actual_dir) # Collect the block names in this file results = {} - parser = etree.XMLParser( remove_comments=True ) - tree = etree.parse( actual_fname, parser=parser ) + parser = etree.XMLParser(remove_comments=True) + tree = etree.parse(actual_fname, parser=parser) root = tree.getroot() for child in root.getchildren(): - results[ child.tag ] = [ grandchild.tag for grandchild in child.getchildren() ] + results[child.tag] = [grandchild.tag for grandchild in child.getchildren()] # Collect block names in included files - for included_root in root.findall( 'Included' ): - for included_file in included_root.findall( 'File' ): - f = included_file.get( 'name' ) - child_results = collect_block_names( f ) + for included_root in root.findall('Included'): + for included_file in included_root.findall('File'): + f = included_file.get('name') + child_results = collect_block_names(f) for k, v in child_results.items(): if k in results: - results[ k ].extend( v ) + results[k].extend(v) else: - results[ k ] = v - os.chdir( pwd ) + results[k] = v + os.chdir(pwd) return results -def generate_geos_tests( decks: Iterable[ TestDeck ] ): +def generate_geos_tests(decks: Iterable[TestDeck], test_type='smoke'): """ """ - for ii, deck in enumerate( decks ): + for ii, deck in enumerate(decks): restartcheck_params = None curvecheck_params = None @@ -97,51 +101,62 @@ def generate_geos_tests( decks: Iterable[ TestDeck ] ): nx, ny, nz = partition N = nx * ny * nz - testcase_name = "{}_{:02d}".format( deck.name, N ) - base_name = "0to{:d}".format( deck.check_step ) - xml_file = "{}.xml".format( deck.name ) - xml_blocks = collect_block_names( xml_file ) + testcase_name = "{}_{:02d}".format(deck.name, N) + base_name = "0to{:d}".format(deck.check_step) + + # Search for the target xml file + xml_file = '' + for suffix in ['', f'_{test_type}']: + if os.path.isfile("{}{}.xml".format(deck.name, suffix)): + xml_file = "{}{}.xml".format(deck.name, suffix) + + if not xml_file: + logger.error(f'Could not find a matching xml file for the test: {deck.name}') + test_build_failures.append(deck.name) + continue + + xml_blocks = collect_block_names(xml_file) checks = [] if curvecheck_params: - checks.append( 'curve' ) + checks.append('curve') steps = [ - geos( deck=xml_file, - name=base_name, - np=N, - ngpu=N, - x_partitions=nx, - y_partitions=ny, - z_partitions=nz, - restartcheck_params=restartcheck_params, - curvecheck_params=curvecheck_params ) + geos(deck=xml_file, + name=base_name, + np=N, + ngpu=N, + x_partitions=nx, + y_partitions=ny, + z_partitions=nz, + restartcheck_params=restartcheck_params, + curvecheck_params=curvecheck_params) ] if deck.restart_step > 0: - checks.append( 'restart' ) + checks.append('restart') steps.append( - geos( deck=xml_file, - name="{:d}to{:d}".format( deck.restart_step, deck.check_step ), - np=N, - ngpu=N, - x_partitions=nx, - y_partitions=ny, - z_partitions=nz, - restart_file=os.path.join( testcase_name, - "{}_restart_{:09d}".format( base_name, deck.restart_step ) ), - baseline_pattern=f"{base_name}_restart_[0-9]+\.root", - allow_rebaseline=False, - restartcheck_params=restartcheck_params ) ) - - AtsTest.stick( level=ii ) - AtsTest.stick( checks=','.join( checks ) ) - AtsTest.stick( solvers=','.join( xml_blocks.get( 'Solvers', [] ) ) ) - AtsTest.stick( outputs=','.join( xml_blocks.get( 'Outputs', [] ) ) ) - AtsTest.stick( constitutive_models=','.join( xml_blocks.get( 'Constitutive', [] ) ) ) - TestCase( name=testcase_name, - desc=deck.description, - label="auto", - owner="GEOS team", - independent=True, - steps=steps ) + geos(deck=xml_file, + name="{:d}to{:d}".format(deck.restart_step, deck.check_step), + np=N, + ngpu=N, + x_partitions=nx, + y_partitions=ny, + z_partitions=nz, + restart_file=os.path.join(testcase_name, + "{}_restart_{:09d}".format(base_name, deck.restart_step)), + baseline_pattern=f"{base_name}_restart_[0-9]+\.root", + allow_rebaseline=False, + restartcheck_params=restartcheck_params)) + + AtsTest.stick(level=ii) + AtsTest.stick(checks=','.join(checks)) + AtsTest.stick(solvers=','.join(xml_blocks.get('Solvers', []))) + AtsTest.stick(outputs=','.join(xml_blocks.get('Outputs', []))) + AtsTest.stick(constitutive_models=','.join(xml_blocks.get('Constitutive', []))) + TestCase(name=testcase_name, + desc=deck.description, + label="auto", + owner="GEOS team", + independent=True, + steps=steps) From a3608d00f936720eb84835495e826666474b7ee0 Mon Sep 17 00:00:00 2001 From: Christopher Sherman Date: Mon, 29 Jan 2024 12:16:15 -0800 Subject: [PATCH 07/14] Appyling yapf --- .../geos_ats/command_line_parsers.py | 81 ++- geos_ats_package/geos_ats/common_utilities.py | 200 ++--- .../geos_ats/configuration_record.py | 327 ++++----- .../geos_ats/environment_setup.py | 73 +- .../geos_ats/machines/batchGeosatsMoab.py | 72 +- geos_ats_package/geos_ats/main.py | 345 ++++----- geos_ats_package/geos_ats/reporting.py | 217 +++--- geos_ats_package/geos_ats/test_builder.py | 140 ++-- geos_ats_package/geos_ats/test_case.py | 312 ++++---- geos_ats_package/geos_ats/test_steps.py | 688 +++++++++--------- 10 files changed, 1242 insertions(+), 1213 deletions(-) diff --git a/geos_ats_package/geos_ats/command_line_parsers.py b/geos_ats_package/geos_ats/command_line_parsers.py index 2af931d..09ead6f 100644 --- a/geos_ats_package/geos_ats/command_line_parsers.py +++ b/geos_ats_package/geos_ats/command_line_parsers.py @@ -36,55 +36,64 @@ def build_command_line_parser(): - parser = argparse.ArgumentParser(description="Runs GEOS integrated tests") + parser = argparse.ArgumentParser( description="Runs GEOS integrated tests" ) - parser.add_argument("geos_bin_dir", type=str, help="GEOS binary directory.") + parser.add_argument( "geos_bin_dir", type=str, help="GEOS binary directory." ) - parser.add_argument("ats_target", type=str, help="ats file") + parser.add_argument( "ats_target", type=str, help="ats file" ) - parser.add_argument("-w", "--workingDir", type=str, help="Root working directory") + parser.add_argument( "-w", "--workingDir", type=str, help="Root working directory" ) - parser.add_argument("-b", "--baselineDir", type=str, help="Root baseline directory") + parser.add_argument( "-b", "--baselineDir", type=str, help="Root baseline directory" ) - action_names = ','.join(action_options.keys()) - parser.add_argument("-a", "--action", type=str, default="run", help=f"Test actions options ({action_names})") + action_names = ','.join( action_options.keys() ) + parser.add_argument( "-a", "--action", type=str, default="run", help=f"Test actions options ({action_names})" ) - check_names = ','.join(check_options.keys()) - parser.add_argument("-c", "--check", type=str, default="all", help=f"Test check options ({check_names})") + check_names = ','.join( check_options.keys() ) + parser.add_argument( "-c", "--check", type=str, default="all", help=f"Test check options ({check_names})" ) - verbosity_names = ','.join(verbose_options.keys()) - parser.add_argument("-v", "--verbose", type=str, default="info", help=f"Log verbosity options ({verbosity_names})") + verbosity_names = ','.join( verbose_options.keys() ) + parser.add_argument( "-v", + "--verbose", + type=str, + default="info", + help=f"Log verbosity options ({verbosity_names})" ) - parser.add_argument("-d", "--detail", action="store_true", default=False, help="Show detailed action/check options") + parser.add_argument( "-d", + "--detail", + action="store_true", + default=False, + help="Show detailed action/check options" ) - parser.add_argument("-i", "--info", action="store_true", default=False, help="Info on various topics") + parser.add_argument( "-i", "--info", action="store_true", default=False, help="Info on various topics" ) - parser.add_argument("-r", - "--restartCheckOverrides", - nargs='+', - action='append', - help='Restart check parameter override (name value)', - default=[]) + parser.add_argument( "-r", + "--restartCheckOverrides", + nargs='+', + action='append', + help='Restart check parameter override (name value)', + default=[] ) - parser.add_argument("--salloc", - default=True, - help="Used by the chaosM machine to first allocate nodes with salloc, before running the tests") + parser.add_argument( + "--salloc", + default=True, + help="Used by the chaosM machine to first allocate nodes with salloc, before running the tests" ) parser.add_argument( "--sallocoptions", type=str, default="", - help="Used to override all command-line options for salloc. No other options with be used or added.") + help="Used to override all command-line options for salloc. No other options with be used or added." ) - parser.add_argument("--ats", nargs='+', default=[], action="append", help="pass arguments to ats") + parser.add_argument( "--ats", nargs='+', default=[], action="append", help="pass arguments to ats" ) - parser.add_argument("--machine", default=None, help="name of the machine") + parser.add_argument( "--machine", default=None, help="name of the machine" ) - parser.add_argument("--machine-dir", default=None, help="Search path for machine definitions") + parser.add_argument( "--machine-dir", default=None, help="Search path for machine definitions" ) - parser.add_argument("-l", "--logs", type=str, default=None) + parser.add_argument( "-l", "--logs", type=str, default=None ) - parser.add_argument("-f", "--allow-failed-tests", default=False, action='store_true') + parser.add_argument( "-f", "--allow-failed-tests", default=False, action='store_true' ) parser.add_argument( "--failIfTestsFail", @@ -93,12 +102,12 @@ def build_command_line_parser(): help="geos_ats normally exits with 0. This will cause it to exit with an error code if there was a failed test." ) - parser.add_argument("-n", "-N", "--numNodes", type=int, default="2") + parser.add_argument( "-n", "-N", "--numNodes", type=int, default="2" ) return parser -def parse_command_line_arguments(args): +def parse_command_line_arguments( args ): parser = build_command_line_parser() options, unkown_args = parser.parse_known_args() exit_flag = False @@ -107,7 +116,7 @@ def parse_command_line_arguments(args): check = options.check if check not in check_options: print( - f"Selected check option ({check}) not recognized. Try running with --help/--details for more information") + f"Selected check option ({check}) not recognized. Try running with --help/--details for more information" ) exit_flag = True action = options.action @@ -119,22 +128,22 @@ def parse_command_line_arguments(args): verbose = options.verbose if verbose not in verbose_options: - print(f"Selected verbose option ({verbose}) not recognized") + print( f"Selected verbose option ({verbose}) not recognized" ) exit_flag = True # Paths if not options.workingDir: - options.workingDir = os.path.basename(options.ats_target) + options.workingDir = os.path.basename( options.ats_target ) if not options.baselineDir: options.baselineDir = options.workingDir # Print detailed information if options.detail: - for option_type, details in zip(['action', 'check'], [action_options, check_options]): - print(f'\nAvailable {option_type} options:') + for option_type, details in zip( [ 'action', 'check' ], [ action_options, check_options ] ): + print( f'\nAvailable {option_type} options:' ) for k, v in details.items(): - print(f' {k}: {v}') + print( f' {k}: {v}' ) exit_flag = True if exit_flag: diff --git a/geos_ats_package/geos_ats/common_utilities.py b/geos_ats_package/geos_ats/common_utilities.py index 7543df1..3e32db1 100644 --- a/geos_ats_package/geos_ats/common_utilities.py +++ b/geos_ats_package/geos_ats/common_utilities.py @@ -10,88 +10,88 @@ # Common code for displaying information to the user. ################################################################################ -logger = logging.getLogger('geos_ats') +logger = logging.getLogger( 'geos_ats' ) -def Error(msg): - raise RuntimeError("Error: %s" % msg) +def Error( msg ): + raise RuntimeError( "Error: %s" % msg ) -def Log(msg): +def Log( msg ): import ats # type: ignore[import] testmode = False try: - testmode = ats.tests.AtsTest.getOptions().get("testmode") + testmode = ats.tests.AtsTest.getOptions().get( "testmode" ) except AttributeError as e: - logger.debug(e) + logger.debug( e ) if testmode: - ats.log("ALEATS: " + msg, echo=True) + ats.log( "ALEATS: " + msg, echo=True ) else: - ats.log(msg, echo=True) + ats.log( msg, echo=True ) -class TextTable(object): +class TextTable( object ): - def __init__(self, columns): + def __init__( self, columns ): self.table = [] self.sep = " : " self.indent = " " self.columns = columns - self.colmax = [None] * columns + self.colmax = [ None ] * columns self.maxwidth = self._getwidth() self.rowbreak = None self.rowbreakstyle = " " - def _getwidth(self): + def _getwidth( self ): maxwidth = 100 if os.name == "posix": try: - sttyout = subprocess.Popen(["stty", "size"], stdout=subprocess.PIPE).communicate()[0] - maxwidth = int(sttyout.split()[1]) + sttyout = subprocess.Popen( [ "stty", "size" ], stdout=subprocess.PIPE ).communicate()[ 0 ] + maxwidth = int( sttyout.split()[ 1 ] ) except: # If the stty size approach does not work, the use a default maxwidth - logger.debug("Using default maxwidth") + logger.debug( "Using default maxwidth" ) return maxwidth - def setHeader(self, *row): - assert (len(row) == self.columns) - self.table.insert(0, row) - self.table.insert(1, None) + def setHeader( self, *row ): + assert ( len( row ) == self.columns ) + self.table.insert( 0, row ) + self.table.insert( 1, None ) - def addRowBreak(self): - self.table.append(None) + def addRowBreak( self ): + self.table.append( None ) - def addRow(self, *row): - assert (len(row) == self.columns) - self.table.append(row) + def addRow( self, *row ): + assert ( len( row ) == self.columns ) + self.table.append( row ) - def setColMax(self, colindex, max): - self.colmax[colindex] = max + def setColMax( self, colindex, max ): + self.colmax[ colindex ] = max - def printTable(self, outfile=sys.stdout): + def printTable( self, outfile=sys.stdout ): table_str = '' - if len(self.table) == 0: + if len( self.table ) == 0: return # find the max column sizes colWidth = [] - for i in range(self.columns): - colWidth.append(max([len(str(row[i])) for row in self.table if row is not None])) + for i in range( self.columns ): + colWidth.append( max( [ len( str( row[ i ] ) ) for row in self.table if row is not None ] ) ) # adjust the colWidths down if colmax is step - for i in range(self.columns): - if self.colmax[i] is not None: - if colWidth[i] > self.colmax[i]: - colWidth[i] = self.colmax[i] + for i in range( self.columns ): + if self.colmax[ i ] is not None: + if colWidth[ i ] > self.colmax[ i ]: + colWidth[ i ] = self.colmax[ i ] # last column is floating - total = sum(colWidth) + self.columns * (1 + len(self.sep)) + len(self.indent) + total = sum( colWidth ) + self.columns * ( 1 + len( self.sep ) ) + len( self.indent ) if total > self.maxwidth: - colWidth[-1] = max(10, self.maxwidth - (total - colWidth[-1])) + colWidth[ -1 ] = max( 10, self.maxwidth - ( total - colWidth[ -1 ] ) ) # output the table rowbreakindex = 0 @@ -99,14 +99,14 @@ def printTable(self, outfile=sys.stdout): # row break controls. # if row is None then this is a break - addBreak = (row is None) or (self.rowbreak and rowbreakindex > 0 and rowbreakindex % self.rowbreak == 0) + addBreak = ( row is None ) or ( self.rowbreak and rowbreakindex > 0 and rowbreakindex % self.rowbreak == 0 ) if addBreak: table_str += self.indent - for i in range(self.columns): + for i in range( self.columns ): if i < self.columns - 1: table_str += f"{self.rowbreakstyle * colWidth[i]}{self.sep}" else: - table_str += self.rowbreakstyle * colWidth[i] + table_str += self.rowbreakstyle * colWidth[ i ] table_str += '\n' if row is None: @@ -118,27 +118,27 @@ def printTable(self, outfile=sys.stdout): # determine how many lines are needed by each column of this row. lines = [] - for i in range(self.columns): - if isinstance(row[i], str): - drow = textwrap.dedent(row[i]) + for i in range( self.columns ): + if isinstance( row[ i ], str ): + drow = textwrap.dedent( row[ i ] ) else: - drow = str(row[i]) + drow = str( row[ i ] ) if i == self.columns - 1: - lines.append(textwrap.wrap(drow, colWidth[i], break_long_words=False)) + lines.append( textwrap.wrap( drow, colWidth[ i ], break_long_words=False ) ) else: - lines.append(textwrap.wrap(drow, colWidth[i], break_long_words=True)) + lines.append( textwrap.wrap( drow, colWidth[ i ], break_long_words=True ) ) - maxlines = max([len(x) for x in lines]) + maxlines = max( [ len( x ) for x in lines ] ) # output the row - for j in range(maxlines): + for j in range( maxlines ): table_str += self.indent - for i in range(self.columns): - if len(lines[i]) > j: - entry = lines[i][j].ljust(colWidth[i]) + for i in range( self.columns ): + if len( lines[ i ] ) > j: + entry = lines[ i ][ j ].ljust( colWidth[ i ] ) else: - entry = " ".ljust(colWidth[i]) + entry = " ".ljust( colWidth[ i ] ) if i < self.columns - 1: table_str += f"{entry}{self.sep}" @@ -147,106 +147,108 @@ def printTable(self, outfile=sys.stdout): table_str += '\n' - outfile.write(table_str) + outfile.write( table_str ) -class InfoTopic(object): +class InfoTopic( object ): - def __init__(self, topic, outfile=sys.stdout): + def __init__( self, topic, outfile=sys.stdout ): self.topic = topic self.subtopics = [] self.outfile = outfile - def addTopic(self, topic, brief, function): - self.subtopics.append((topic, brief, function)) + def addTopic( self, topic, brief, function ): + self.subtopics.append( ( topic, brief, function ) ) - def startBanner(self): - self.outfile.write("=" * 80 + '\n') - self.outfile.write(self.topic.center(80)) - self.outfile.write("\n" + "=" * 80 + '\n') + def startBanner( self ): + self.outfile.write( "=" * 80 + '\n' ) + self.outfile.write( self.topic.center( 80 ) ) + self.outfile.write( "\n" + "=" * 80 + '\n' ) - def endBanner(self): - self.outfile.write("." * 80 + '\n') + def endBanner( self ): + self.outfile.write( "." * 80 + '\n' ) - def findTopic(self, topicName): + def findTopic( self, topicName ): for topic in self.subtopics: - if topic[0] == topicName: + if topic[ 0 ] == topicName: return topic return None - def displayMenu(self): + def displayMenu( self ): self.startBanner() - table = TextTable(3) - for i, topic in enumerate(self.subtopics): - table.addRow(i, topic[0], topic[1]) + table = TextTable( 3 ) + for i, topic in enumerate( self.subtopics ): + table.addRow( i, topic[ 0 ], topic[ 1 ] ) - table.addRow(i + 1, "exit", "") + table.addRow( i + 1, "exit", "" ) table.printTable() import ats - if ats.tests.AtsTest.getOptions().get("testmode"): + if ats.tests.AtsTest.getOptions().get( "testmode" ): return while True: - logger.info("Enter a topic: ") + logger.info( "Enter a topic: " ) sys.stdout.flush() try: line = sys.stdin.readline() except KeyboardInterrupt as e: - logger.debug(e) + logger.debug( e ) return None value = line.strip() - topic = self.findTopic(value) + topic = self.findTopic( value ) if topic: return topic try: - index = int(value) - if index >= 0 and index < len(self.subtopics): - return self.subtopics[index] - if index == len(self.subtopics): + index = int( value ) + if index >= 0 and index < len( self.subtopics ): + return self.subtopics[ index ] + if index == len( self.subtopics ): return None except ValueError as e: - logger.debug(e) + logger.debug( e ) - def process(self, args): + def process( self, args ): - if len(args) == 0: + if len( args ) == 0: topic = self.displayMenu() if topic is not None: - topic[2]() + topic[ 2 ]() else: - topicName = args[0] - topic = self.findTopic(topicName) + topicName = args[ 0 ] + topic = self.findTopic( topicName ) if topic: - topic[2](*args[1:]) + topic[ 2 ]( *args[ 1: ] ) else: - logger.warning(f"unknown topic: {topicName}") + logger.warning( f"unknown topic: {topicName}" ) -def removeLogDirectories(dir): +def removeLogDirectories( dir ): # look for subdirs containing 'ats.log' and 'geos_ats.config' # look for symlinks that point to such a directory - files = os.listdir(dir) + files = os.listdir( dir ) deldir = [] for f in files: - ff = os.path.join(dir, f) - if os.path.isdir(ff) and not os.path.islink(ff): + ff = os.path.join( dir, f ) + if os.path.isdir( ff ) and not os.path.islink( ff ): tests = [ - all([os.path.exists(os.path.join(ff, "ats.log")), - os.path.exists(os.path.join(ff, "geos_ats.config"))]), - f.find("TestLogs.") == 0 + all( [ + os.path.exists( os.path.join( ff, "ats.log" ) ), + os.path.exists( os.path.join( ff, "geos_ats.config" ) ) + ] ), + f.find( "TestLogs." ) == 0 ] - if any(tests): - deldir.append(ff) - shutil.rmtree(ff) + if any( tests ): + deldir.append( ff ) + shutil.rmtree( ff ) for f in files: - ff = os.path.join(dir, f) - if os.path.islink(ff): - pointsto = os.path.realpath(ff) + ff = os.path.join( dir, f ) + if os.path.islink( ff ): + pointsto = os.path.realpath( ff ) if pointsto in deldir: - os.remove(ff) + os.remove( ff ) diff --git a/geos_ats_package/geos_ats/configuration_record.py b/geos_ats_package/geos_ats/configuration_record.py index 1d0291d..d98cc51 100644 --- a/geos_ats_package/geos_ats/configuration_record.py +++ b/geos_ats_package/geos_ats/configuration_record.py @@ -10,12 +10,12 @@ ################################################################################ # Get the active logger instance -logger = logging.getLogger('geos_ats') +logger = logging.getLogger( 'geos_ats' ) -class ConfigItem(object): +class ConfigItem( object ): - def __init__(self, name, type, default, doc, public): + def __init__( self, name, type, default, doc, public ): self.name = name self.type = type self.default = default @@ -24,67 +24,67 @@ def __init__(self, name, type, default, doc, public): self.public = public -class Config(object): +class Config( object ): - def __init__(self): - self.__dict__["_items"] = {} + def __init__( self ): + self.__dict__[ "_items" ] = {} - def set(self, name, value): + def set( self, name, value ): # error checking - item = self._items[name] + item = self._items[ name ] try: if item.type == str: - value = item.type(value) + value = item.type( value ) else: - if isinstance(value, str): - value = item.type(eval(value)) + if isinstance( value, str ): + value = item.type( eval( value ) ) else: - value = item.type(value) + value = item.type( value ) except ValueError: - Error("Attempted to set config.%s (which is %s) with %s" % (name, str(item.type), str(value))) + Error( "Attempted to set config.%s (which is %s) with %s" % ( name, str( item.type ), str( value ) ) ) - item.value = item.type(value) + item.value = item.type( value ) - def copy_values(self, target): - logger.debug("Copying command line options to config:") - target_dict = vars(target) + def copy_values( self, target ): + logger.debug( "Copying command line options to config:" ) + target_dict = vars( target ) for k in self._items.keys(): if k in target_dict: - logger.debug(f" {k}: {target_dict[k]}") - self.set(k, target_dict[k]) + logger.debug( f" {k}: {target_dict[k]}" ) + self.set( k, target_dict[ k ] ) - def get(self, name): + def get( self, name ): # error checking - return self._items[name].value + return self._items[ name ].value - def add(self, name, type, default, doc, public=True): - item = ConfigItem(name, type, default, doc, public) - self._items[item.name] = item + def add( self, name, type, default, doc, public=True ): + item = ConfigItem( name, type, default, doc, public ) + self._items[ item.name ] = item - def checkname(self, name): + def checkname( self, name ): if name not in self.__dict__: - matches = difflib.get_close_matches(name, self._items.keys()) - if len(matches) == 0: - Error("Unknown config name: %s. " - "See 'geos_ats -i config' for the complete list." % (name)) + matches = difflib.get_close_matches( name, self._items.keys() ) + if len( matches ) == 0: + Error( "Unknown config name: %s. " + "See 'geos_ats -i config' for the complete list." % ( name ) ) else: - Error("Unknown config name: %s. " - "Perhaps you meant '%s'. " - "See 'geos_ats -i config' for the complete list." % (name, matches[0])) + Error( "Unknown config name: %s. " + "Perhaps you meant '%s'. " + "See 'geos_ats -i config' for the complete list." % ( name, matches[ 0 ] ) ) - def __setattr__(self, name, value): + def __setattr__( self, name, value ): if name in self._items: - self.set(name, value) + self.set( name, value ) else: - self.checkname(name) + self.checkname( name ) - def __getattr__(self, name): + def __getattr__( self, name ): if name in self._items: - return self._items[name].value + return self._items[ name ].value else: - self.checkname(name) + self.checkname( name ) # The global config object @@ -95,178 +95,179 @@ def __getattr__(self, name): configDepth = 0 -def infoConfigShow(public, outfile=sys.stdout): - topic = InfoTopic("config show", outfile) +def infoConfigShow( public, outfile=sys.stdout ): + topic = InfoTopic( "config show", outfile ) topic.startBanner() import ats # type: ignore[import] - keys = sorted(config._items.keys()) - table = TextTable(3) + keys = sorted( config._items.keys() ) + table = TextTable( 3 ) for k in keys: - item = config._items[k] - if (public and item.public) or (not public): + item = config._items[ k ] + if ( public and item.public ) or ( not public ): if item.default == item.value: diff = " " else: diff = "*" - table.addRow(item.name, diff, item.value) + table.addRow( item.name, diff, item.value ) - table.printTable(outfile) + table.printTable( outfile ) - cf = ats.tests.AtsTest.getOptions().get("configFile") - outfile.write(f"\nConfig file: {cf}") + cf = ats.tests.AtsTest.getOptions().get( "configFile" ) + outfile.write( f"\nConfig file: {cf}" ) - configOverride = ats.tests.AtsTest.getOptions().get("configOverride", {}) + configOverride = ats.tests.AtsTest.getOptions().get( "configOverride", {} ) if configOverride: - outfile.write("\nCommand line overrides:") - table = TextTable(1) + outfile.write( "\nCommand line overrides:" ) + table = TextTable( 1 ) for key, value in configOverride.items(): - table.addRow(key) - table.printTable(outfile) + table.addRow( key ) + table.printTable( outfile ) topic.endBanner() -def infoConfigDocumentation(public): +def infoConfigDocumentation( public ): - topic = InfoTopic("config doc") + topic = InfoTopic( "config doc" ) topic.startBanner() - keys = sorted(config._items.keys()) - table = TextTable(4) - table.addRow("[NAME]", "[TYPE]", "[DEFAULT]", "[DOC]") + keys = sorted( config._items.keys() ) + table = TextTable( 4 ) + table.addRow( "[NAME]", "[TYPE]", "[DEFAULT]", "[DOC]" ) for k in keys: - item = config._items[k] - if (public and item.public) or (not public): - table.addRow(item.name, item.type.__name__, item.default, item.doc) + item = config._items[ k ] + if ( public and item.public ) or ( not public ): + table.addRow( item.name, item.type.__name__, item.default, item.doc ) - table.colmax[2] = 20 + table.colmax[ 2 ] = 20 table.printTable() topic.endBanner() -def infoConfig(*args): +def infoConfig( *args ): - menu = InfoTopic("config") - menu.addTopic("show", "Show all the config options", lambda *x: infoConfigShow(True)) - menu.addTopic("doc", "Documentation for the config options", lambda *x: infoConfigDocumentation(True)) - menu.addTopic("showall", "Show all the config options (including the internal options)", - lambda: infoConfigShow(False)) - menu.addTopic("docall", "Documentation for the config options (including the internal options)", - lambda: infoConfigDocumentation(False)) - menu.process(args) + menu = InfoTopic( "config" ) + menu.addTopic( "show", "Show all the config options", lambda *x: infoConfigShow( True ) ) + menu.addTopic( "doc", "Documentation for the config options", lambda *x: infoConfigDocumentation( True ) ) + menu.addTopic( "showall", "Show all the config options (including the internal options)", + lambda: infoConfigShow( False ) ) + menu.addTopic( "docall", "Documentation for the config options (including the internal options)", + lambda: infoConfigDocumentation( False ) ) + menu.process( args ) -def initializeConfig(configFile, configOverride, options): +def initializeConfig( configFile, configOverride, options ): # determine the directory where geos_ats is located. Used to find # location of other programs. - geos_atsdir = os.path.realpath(os.path.dirname(__file__)) + geos_atsdir = os.path.realpath( os.path.dirname( __file__ ) ) # configfile - config.add("testbaseline_directory", str, "", "Base directory that contains all the baselines") + config.add( "testbaseline_directory", str, "", "Base directory that contains all the baselines" ) - config.add("geos_bin_dir", str, "", "Directory that contains 'geos' and related executables.") + config.add( "geos_bin_dir", str, "", "Directory that contains 'geos' and related executables." ) - config.add("userscript_path", str, "", - "Directory that contains scripts for testing, searched after test directory and executable_path.") + config.add( "userscript_path", str, "", + "Directory that contains scripts for testing, searched after test directory and executable_path." ) - config.add("clean_on_pass", bool, False, "If True, then after a TestCase passes, " - "all temporary files are removed.") + config.add( "clean_on_pass", bool, False, "If True, then after a TestCase passes, " + "all temporary files are removed." ) # geos options - config.add("geos_default_args", str, "-i", - "A string containing arguments that will always appear on the geos commandline") + config.add( "geos_default_args", str, "-i", + "A string containing arguments that will always appear on the geos commandline" ) # reporting - config.add("report_html", bool, True, "True if HTML formatted results will be generated with the report action") - config.add("report_html_file", str, "test_results.html", "Location to write the html report") - config.add("report_html_periodic", bool, True, "True to update the html file during the periodic reports") - config.add("browser_command", str, "firefox -no-remote", "Command to use to launch a browser to view html results") - config.add("browser", bool, False, "If True, then launch the browser_command to view the report_html_file") - config.add("report_doc_dir", str, os.path.normpath(os.path.join(geos_atsdir, "..", "doc")), - "Location to the test doc directory (used with html reports)") - config.add("report_doc_link", bool, True, "Link against docgen (used with html reports)") - config.add("report_doc_remake", bool, False, - "Remake test documentation, even if it already exists (used with html reports)") + config.add( "report_html", bool, True, "True if HTML formatted results will be generated with the report action" ) + config.add( "report_html_file", str, "test_results.html", "Location to write the html report" ) + config.add( "report_html_periodic", bool, True, "True to update the html file during the periodic reports" ) + config.add( "browser_command", str, "firefox -no-remote", + "Command to use to launch a browser to view html results" ) + config.add( "browser", bool, False, "If True, then launch the browser_command to view the report_html_file" ) + config.add( "report_doc_dir", str, os.path.normpath( os.path.join( geos_atsdir, "..", "doc" ) ), + "Location to the test doc directory (used with html reports)" ) + config.add( "report_doc_link", bool, True, "Link against docgen (used with html reports)" ) + config.add( "report_doc_remake", bool, False, + "Remake test documentation, even if it already exists (used with html reports)" ) - config.add("report_ini", bool, True, "True if you want ini results to be generated with the report action") - config.add("report_ini_file", str, "test_results.ini", "Location to write the ini report") + config.add( "report_ini", bool, True, "True if you want ini results to be generated with the report action" ) + config.add( "report_ini_file", str, "test_results.ini", "Location to write the ini report" ) - config.add("report_notations", type([]), [], "Lines of text that are inserted into the reports.") + config.add( "report_notations", type( [] ), [], "Lines of text that are inserted into the reports." ) - config.add("report_notbuilt_regexp", str, "(not built into this version)", - "Regular expression that must appear in output to indicate that feature is not built.") + config.add( "report_notbuilt_regexp", str, "(not built into this version)", + "Regular expression that must appear in output to indicate that feature is not built." ) - config.add("checkmessages_always_ignore_regexp", type([]), ["not available in this version"], - "Regular expression to ignore in all checkmessages steps.") + config.add( "checkmessages_always_ignore_regexp", type( [] ), [ "not available in this version" ], + "Regular expression to ignore in all checkmessages steps." ) - config.add("checkmessages_never_ignore_regexp", type([]), ["not yet implemented"], - "Regular expression to not ignore in all checkmessages steps.") + config.add( "checkmessages_never_ignore_regexp", type( [] ), [ "not yet implemented" ], + "Regular expression to not ignore in all checkmessages steps." ) # timing and priority - config.add("priority", str, "equal", "Method of prioritization of tests: [\"equal\", \"processors\",\"timing\"]") - config.add("timing_file", str, "timing.txt", "Location of timing file") + config.add( "priority", str, "equal", "Method of prioritization of tests: [\"equal\", \"processors\",\"timing\"]" ) + config.add( "timing_file", str, "timing.txt", "Location of timing file" ) # batch - config.add("batch_dryrun", bool, False, - "If true, the batch jobs will not be submitted, but the batch scripts will be created") - config.add("batch_interactive", bool, False, "If true, the batch jobs will be treated as interactive jobs") - config.add("batch_bank", str, "", "The name of the bank to use") - config.add("batch_ppn", int, 0, "Number of processors per node") - config.add("batch_partition", str, "", "the batch partition, if not specified the default will be used.") - config.add("batch_queue", str, "pbatch", "the batch queue.") - config.add("batch_header", type([]), [], "Additional lines to add to the batch header") + config.add( "batch_dryrun", bool, False, + "If true, the batch jobs will not be submitted, but the batch scripts will be created" ) + config.add( "batch_interactive", bool, False, "If true, the batch jobs will be treated as interactive jobs" ) + config.add( "batch_bank", str, "", "The name of the bank to use" ) + config.add( "batch_ppn", int, 0, "Number of processors per node" ) + config.add( "batch_partition", str, "", "the batch partition, if not specified the default will be used." ) + config.add( "batch_queue", str, "pbatch", "the batch queue." ) + config.add( "batch_header", type( [] ), [], "Additional lines to add to the batch header" ) # retry - config.add("max_retry", int, 2, "Maximum number of times to retry failed runs.") - config.add("retry_err_regexp", str, - "(launch failed|Failure in initializing endpoint|channel initialization failed)", - "Regular expression that must appear in error log in order to retry.") + config.add( "max_retry", int, 2, "Maximum number of times to retry failed runs." ) + config.add( "retry_err_regexp", str, + "(launch failed|Failure in initializing endpoint|channel initialization failed)", + "Regular expression that must appear in error log in order to retry." ) # timeout - config.add("default_timelimit", str, "30m", - "This sets a default timelimit for all test steps which do not explicitly set a timelimit.") - config.add("override_timelimit", bool, False, - "If true, the value used for the default time limit will override the time limit for each test step.") + config.add( "default_timelimit", str, "30m", + "This sets a default timelimit for all test steps which do not explicitly set a timelimit." ) + config.add( "override_timelimit", bool, False, + "If true, the value used for the default time limit will override the time limit for each test step." ) # Decomposition Multiplication config.add( "decomp_factor", int, 1, "This sets the multiplication factor to be applied to the decomposition and number of procs of all eligible tests." ) - config.add("override_np", int, 0, "If non-zero, maximum number of processors to use for each test step.") + config.add( "override_np", int, 0, "If non-zero, maximum number of processors to use for each test step." ) # global environment variables - config.add("environment", dict, {}, "Additional environment variables to use during testing") + config.add( "environment", dict, {}, "Additional environment variables to use during testing" ) # General check config - for check in ("restartcheck", ): + for check in ( "restartcheck", ): config.add( "%s_enabled" % check, bool, True, "If True, this check has the possibility of running, " "but might not run depending on the '--check' command line option. " - "If False, this check will never be run.") + "If False, this check will never be run." ) - for check in ("hdf5_dif.py", ): - config.add("%s_script" % check, - str, - os.path.join(geos_atsdir, "helpers/%s.py" % check), - "Location to the %s frontend script." % check, - public=False) + for check in ( "hdf5_dif.py", ): + config.add( "%s_script" % check, + str, + os.path.join( geos_atsdir, "helpers/%s.py" % check ), + "Location to the %s frontend script." % check, + public=False ) # Checks: Restartcheck - config.add("restart_skip_missing", bool, False, "Determines whether new/missing fields are ignored") - config.add("restart_exclude_pattern", list, [], "A list of field names to ignore in restart files") + config.add( "restart_skip_missing", bool, False, "Determines whether new/missing fields are ignored" ) + config.add( "restart_exclude_pattern", list, [], "A list of field names to ignore in restart files" ) # Checks: Curvecheck - config.add("curvecheck_enabled", bool, True, "Determines whether curvecheck steps are run.") - config.add("curvecheck_tapestry_mode", bool, False, - "Provide temporary backwards compatibility for nighty and weekly suites until they are using geos_ats") - config.add("curvecheck_absolute", float, 1e-5, "absolute tolerance") - config.add("curvecheck_relative", float, 1e-5, "relative tolerance") + config.add( "curvecheck_enabled", bool, True, "Determines whether curvecheck steps are run." ) + config.add( "curvecheck_tapestry_mode", bool, False, + "Provide temporary backwards compatibility for nighty and weekly suites until they are using geos_ats" ) + config.add( "curvecheck_absolute", float, 1e-5, "absolute tolerance" ) + config.add( "curvecheck_relative", float, 1e-5, "relative tolerance" ) config.add( "curvecheck_failtype", str, "composite", "String that represents failure check. 'composite or relative' will fail curvecheck if either the composite error or relative error is too high. 'absolute and slope' will fail only if both the absolute error check and the slope error check fail. The default value is 'composite'." @@ -279,74 +280,74 @@ def initializeConfig(configFile, configOverride, options): "curvecheck_delete_temps", bool, True, "Curvecheck generates a number of temporary data files that are used to create the images for the html file. If this parameter is true, curvecheck will delete these temporary files. By default, the parameter is true." ) - config.add("gnuplot_executable", str, os.path.join("/usr", "bin", "gnuplot"), "Location to gnuplot") + config.add( "gnuplot_executable", str, os.path.join( "/usr", "bin", "gnuplot" ), "Location to gnuplot" ) # Rebaseline: config.add( "rebaseline_undo", bool, False, "If True, and the action is set to 'rebaseline'," - " this option will undo (revert) a previous rebaseline.") - config.add("rebaseline_ask", bool, True, "If True, the rebaseline will not occur until the user has anwered an" - " 'are you sure?' question") + " this option will undo (revert) a previous rebaseline." ) + config.add( "rebaseline_ask", bool, True, "If True, the rebaseline will not occur until the user has anwered an" + " 'are you sure?' question" ) # test modifier - config.add("testmodifier", str, "", "Name of a test modifier to apply") + config.add( "testmodifier", str, "", "Name of a test modifier to apply" ) # filters - config.add("filter_maxprocessors", int, -1, "If not -1, Run only those tests where the number of" - " processors is less than or equal to this value") + config.add( "filter_maxprocessors", int, -1, "If not -1, Run only those tests where the number of" + " processors is less than or equal to this value" ) # machines - config.add("machine_options", list, [], "Arguments to pass to the machine module") + config.add( "machine_options", list, [], "Arguments to pass to the machine module" ) - config.add("script_launch", int, 0, "Whether to launch scripts (and other serial steps) on compute nodes") - config.add("openmpi_install", str, "", "Location to the openmpi installation") - config.add("openmpi_maxprocs", int, 0, "Number of maximum processors openmpi") - config.add("openmpi_procspernode", int, 1, "Number of processors per node for openmpi") + config.add( "script_launch", int, 0, "Whether to launch scripts (and other serial steps) on compute nodes" ) + config.add( "openmpi_install", str, "", "Location to the openmpi installation" ) + config.add( "openmpi_maxprocs", int, 0, "Number of maximum processors openmpi" ) + config.add( "openmpi_procspernode", int, 1, "Number of processors per node for openmpi" ) config.add( "openmpi_precommand", str, "", "A string that will be" " prepended to each command. If the substring '%(np)s' is present," " it will be replaced by then number of processors required for the" " test. If the substring '%(J)s' is present, it will be replaced by" - " the unique name of the test.") - config.add("openmpi_args", str, "", "A string of arguments to mpirun") + " the unique name of the test." ) + config.add( "openmpi_args", str, "", "A string of arguments to mpirun" ) config.add( "openmpi_terminate", str, "", "A string that will be" " called upon abnormal termination. If the substring '%(J)s' is present," - " it will be replaced by the unique name of the test.") + " it will be replaced by the unique name of the test." ) - config.add("windows_mpiexe", str, "", "Location to mpiexe") - config.add("windows_nompi", bool, False, "Run executables on nompi processor") - config.add("windows_oversubscribe", int, 1, - "Multiplier to number of processors to allow oversubscription of processors") + config.add( "windows_mpiexe", str, "", "Location to mpiexe" ) + config.add( "windows_nompi", bool, False, "Run executables on nompi processor" ) + config.add( "windows_oversubscribe", int, 1, + "Multiplier to number of processors to allow oversubscription of processors" ) # populate the config with overrides from the command line for key, value in configOverride.items(): try: - setattr(config, key, value) + setattr( config, key, value ) except RuntimeError as e: # this allows for the testconfig file to define it's own # config options that can be overridden at the command line. - logger.debug(e) + logger.debug( e ) # Setup the config dict if configFile: - logger.warning("Config file override currently not available") + logger.warning( "Config file override currently not available" ) ## override the config file from the command line for key, value in configOverride.items(): - setattr(config, key, value) + setattr( config, key, value ) # validate prioritization scheme - if config.priority.lower().startswith("eq"): + if config.priority.lower().startswith( "eq" ): config.priority = "equal" - elif config.priority.lower().startswith("proc"): + elif config.priority.lower().startswith( "proc" ): config.priority = "processors" - elif config.priority.lower().startswith("tim"): + elif config.priority.lower().startswith( "tim" ): config.priority = "timing" else: - Error("priority '%s' is not valid" % config.priority) + Error( "priority '%s' is not valid" % config.priority ) ## environment variables for k, v in config.environment.items(): - os.environ[k] = v + os.environ[ k ] = v diff --git a/geos_ats_package/geos_ats/environment_setup.py b/geos_ats_package/geos_ats/environment_setup.py index 2864a8e..698132f 100644 --- a/geos_ats_package/geos_ats/environment_setup.py +++ b/geos_ats_package/geos_ats/environment_setup.py @@ -4,58 +4,65 @@ import argparse -def setup_ats(src_path, build_path, baseline_dir, working_dir, ats_xargs, ats_machine, ats_machine_dir): - bin_dir = os.path.join(build_path, "bin") - geos_ats_fname = os.path.join(bin_dir, "run_geos_ats") - test_path = os.path.join(build_path, "integratedTests") - link_path = os.path.join(test_path, "integratedTests") - run_script_fname = os.path.join(test_path, "geos_ats.sh") - log_dir = os.path.join(test_path, "TestResults") - baseline_dir = os.path.abspath(baseline_dir) - working_dir = os.path.abspath(working_dir) - ats_main_file = os.path.abspath(os.path.join(src_path, 'inputFiles', 'main.ats')) +def setup_ats( src_path, build_path, baseline_dir, working_dir, ats_xargs, ats_machine, ats_machine_dir ): + bin_dir = os.path.join( build_path, "bin" ) + geos_ats_fname = os.path.join( bin_dir, "run_geos_ats" ) + test_path = os.path.join( build_path, "integratedTests" ) + link_path = os.path.join( test_path, "integratedTests" ) + run_script_fname = os.path.join( test_path, "geos_ats.sh" ) + log_dir = os.path.join( test_path, "TestResults" ) + baseline_dir = os.path.abspath( baseline_dir ) + working_dir = os.path.abspath( working_dir ) + ats_main_file = os.path.abspath( os.path.join( src_path, 'inputFiles', 'main.ats' ) ) # Create a symbolic link to working directory - os.makedirs(working_dir, exist_ok=True) - if os.path.islink(link_path): - print('integratedTests symlink already exists') + os.makedirs( working_dir, exist_ok=True ) + if os.path.islink( link_path ): + print( 'integratedTests symlink already exists' ) else: - os.symlink(working_dir, link_path) + os.symlink( working_dir, link_path ) # Build extra arguments that should be passed to ATS - joined_args = [' '.join(x) for x in ats_xargs] - ats_args = ' '.join([f'--ats {x}' for x in joined_args]) + joined_args = [ ' '.join( x ) for x in ats_xargs ] + ats_args = ' '.join( [ f'--ats {x}' for x in joined_args ] ) if ats_machine: ats_args += f' --machine {ats_machine}' if ats_machine_dir: ats_args += f' --machine-dir {ats_machine_dir}' # Write the bash script to run ats. - with open(run_script_fname, "w") as g: - g.write("#!/bin/bash\n") - g.write(f"{geos_ats_fname} {bin_dir} {ats_main_file} --workingDir {working_dir} --baselineDir {baseline_dir} --logs {log_dir} {ats_args} \"$@\"\n") + with open( run_script_fname, "w" ) as g: + g.write( "#!/bin/bash\n" ) + g.write( + f"{geos_ats_fname} {bin_dir} {ats_main_file} --workingDir {working_dir} --baselineDir {baseline_dir} --logs {log_dir} {ats_args} \"$@\"\n" + ) # Make the script executable - st = os.stat(run_script_fname) - os.chmod(run_script_fname, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) + st = os.stat( run_script_fname ) + os.chmod( run_script_fname, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH ) def main(): # Cmake may combine the final arguments into a string literal # Manually unpack those before parsing - final_arg = sys.argv.pop(-1) - sys.argv.extend(final_arg.split()) - - parser = argparse.ArgumentParser(description="Setup ATS script") - parser.add_argument("src_path", type=str, help="GEOS src path") - parser.add_argument("build_path", type=str, help="GEOS build path") - parser.add_argument("baseline_dir", type=str, help="GEOS test baseline root directory") - parser.add_argument("working_dir", type=str, help="GEOS test working root directory") - parser.add_argument("--ats", nargs='+', default=[], action="append", help="Arguments that should be passed to ats") - parser.add_argument("--machine", type=str, default='', help="ATS machine name") - parser.add_argument("--machine-dir", type=str, default='', help="ATS machine directory") + final_arg = sys.argv.pop( -1 ) + sys.argv.extend( final_arg.split() ) + + parser = argparse.ArgumentParser( description="Setup ATS script" ) + parser.add_argument( "src_path", type=str, help="GEOS src path" ) + parser.add_argument( "build_path", type=str, help="GEOS build path" ) + parser.add_argument( "baseline_dir", type=str, help="GEOS test baseline root directory" ) + parser.add_argument( "working_dir", type=str, help="GEOS test working root directory" ) + parser.add_argument( "--ats", + nargs='+', + default=[], + action="append", + help="Arguments that should be passed to ats" ) + parser.add_argument( "--machine", type=str, default='', help="ATS machine name" ) + parser.add_argument( "--machine-dir", type=str, default='', help="ATS machine directory" ) options, unkown_args = parser.parse_known_args() - setup_ats(options.src_path, options.build_path, options.baseline_dir, options.working_dir, options.ats, options.machine, options.machine_dir) + setup_ats( options.src_path, options.build_path, options.baseline_dir, options.working_dir, options.ats, + options.machine, options.machine_dir ) if __name__ == '__main__': diff --git a/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py b/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py index e2bb4e7..488aba1 100644 --- a/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py +++ b/geos_ats_package/geos_ats/machines/batchGeosatsMoab.py @@ -7,30 +7,30 @@ import logging debug = configuration.debug -logger = logging.getLogger('geos_ats') +logger = logging.getLogger( 'geos_ats' ) -class BatchGeosatsMoab(BatchMachine): +class BatchGeosatsMoab( BatchMachine ): """The batch machine """ - def init(self): + def init( self ): - super(BatchGeosatsMoab, self).init() + super( BatchGeosatsMoab, self ).init() if "SLURM_NNODES" in os.environ.keys(): - self.ppn = int(os.getenv("SLURM_TASKS_PER_NODE", "1").split("(")[0]) + self.ppn = int( os.getenv( "SLURM_TASKS_PER_NODE", "1" ).split( "(" )[ 0 ] ) elif "SLURM_JOB_NUM_NODES" in os.environ.keys(): - self.ppn = int(os.getenv("SLURM_JOB_CPUS_PER_NODE", "1").split("(")[0]) + self.ppn = int( os.getenv( "SLURM_JOB_CPUS_PER_NODE", "1" ).split( "(" )[ 0 ] ) else: self.ppn = 0 self.numberTestsRunningMax = 2048 - def canRun(self, test): + def canRun( self, test ): return '' - def load(self, testlist): + def load( self, testlist ): """Receive a list of tests to possibly run. Submit the set of tests to batch. """ @@ -41,20 +41,20 @@ def load(self, testlist): # for each test group make an msub file if t.groupSerialNumber == 1: - testCase = getattr(t, "geos_atsTestCase", None) + testCase = getattr( t, "geos_atsTestCase", None ) if testCase: - batchFilename = os.path.join(testCase.dirnamefull, "batch_%s.msub" % testCase.name) - self.writeSubmitScript(batchFilename, testCase) - self.jobid = self.submitBatchScript(testCase.name, batchFilename) + batchFilename = os.path.join( testCase.dirnamefull, "batch_%s.msub" % testCase.name ) + self.writeSubmitScript( batchFilename, testCase ) + self.jobid = self.submitBatchScript( testCase.name, batchFilename ) - def writeSubmitScript(self, batchFilename, testCase): + def writeSubmitScript( self, batchFilename, testCase ): - fc = open(batchFilename, "w") + fc = open( batchFilename, "w" ) batch = testCase.batch # get references to the options and configuration options = AtsTest.getOptions() - config = options.get("config", None) + config = options.get( "config", None ) # ppn # 1. first check batch object @@ -69,10 +69,10 @@ def writeSubmitScript(self, batchFilename, testCase): ppn = self.ppn if ppn == 0: - raise RuntimeError(""" + raise RuntimeError( """ Unable to find the number of processors per node in BatchGeosatsMoab. Try setting batch_ppn= on the - command line.""") + command line.""" ) # Specifies parallel Lustre file system. gresLine = "" @@ -81,7 +81,7 @@ def writeSubmitScript(self, batchFilename, testCase): # determine the max number of processors in this job maxprocs = testCase.findMaxNumberOfProcessors() - minNodes = maxprocs / ppn + (maxprocs % ppn != 0) + minNodes = maxprocs / ppn + ( maxprocs % ppn != 0 ) # MSUB options msub_str = '#!/bin/csh' @@ -114,20 +114,20 @@ def writeSubmitScript(self, batchFilename, testCase): msub_str += f"\n\ncd {testCase.dirnamefull}" # pull out options to construct the command line - action = options.get("action") - checkoption = options.get("checkoption") - configFile = options.get("configFile") - configOverride = options.get("configOverride") - atsFlags = options.get("atsFlags") - geos_atsPath = options.get("geos_atsPath") - machine = options.get("machine") + action = options.get( "action" ) + checkoption = options.get( "checkoption" ) + configFile = options.get( "configFile" ) + configOverride = options.get( "configOverride" ) + atsFlags = options.get( "atsFlags" ) + geos_atsPath = options.get( "geos_atsPath" ) + machine = options.get( "machine" ) # construct the command line msub_str += f'\n{geos_atsPath} -a {action} -c {checkoption}' msub_str += f' -f {configFile} -N {minNodes:d} --machine={machine}' for key, value in configOverride.items(): - if key.startswith("batch"): + if key.startswith( "batch" ): continue msub_str += f' {key}="{value}"' @@ -138,26 +138,26 @@ def writeSubmitScript(self, batchFilename, testCase): msub_str += f" batch_interactive=True {testCase.name}" # Write and close the file - fc.write(msub_str) + fc.write( msub_str ) fc.close() - def submitBatchScript(self, testname, batchFilename): + def submitBatchScript( self, testname, batchFilename ): options = AtsTest.getOptions() - config = options.get("config", None) + config = options.get( "config", None ) if config and config.batch_dryrun: return - p = subprocess.Popen(["msub", batchFilename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - out = p.communicate()[0] + p = subprocess.Popen( [ "msub", batchFilename ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) + out = p.communicate()[ 0 ] if p.returncode: - raise RuntimeError(f"Error submitting {testname} to batch: {out}") + raise RuntimeError( f"Error submitting {testname} to batch: {out}" ) try: - jobid = int(out) - logger.info(f" Submitting {testname}, jobid = {jobid:d}") + jobid = int( out ) + logger.info( f" Submitting {testname}, jobid = {jobid:d}" ) except: err = f"Error submitting {testname} to batch: {out}" - logger.error(err) - raise RuntimeError(err) + logger.error( err ) + raise RuntimeError( err ) diff --git a/geos_ats_package/geos_ats/main.py b/geos_ats_package/geos_ats/main.py index f98c372..5466012 100644 --- a/geos_ats_package/geos_ats/main.py +++ b/geos_ats_package/geos_ats/main.py @@ -7,12 +7,12 @@ import logging from geos_ats import command_line_parsers, test_builder -test_actions = ("run", "rerun", "check", "continue") -report_actions = ("run", "rerun", "report", "continue") +test_actions = ( "run", "rerun", "check", "continue" ) +report_actions = ( "run", "rerun", "report", "continue" ) # Setup the logger -logging.basicConfig(level=logging.DEBUG, format='(%(asctime)s %(module)s:%(lineno)d) %(message)s') -logger = logging.getLogger('geos_ats') +logging.basicConfig( level=logging.DEBUG, format='(%(asctime)s %(module)s:%(lineno)d) %(message)s' ) +logger = logging.getLogger( 'geos_ats' ) # Job records current_subproc = None @@ -20,100 +20,100 @@ geos_atsStartTime = 0 -def build_ats_arguments(options, originalargv, config): +def build_ats_arguments( options, originalargv, config ): # construct the argv to pass to the ATS: atsargv = [] - atsargv.append(originalargv[0]) - atsargv.append("--showGroupStartOnly") - atsargv.append("--logs=%s" % options.logs) + atsargv.append( originalargv[ 0 ] ) + atsargv.append( "--showGroupStartOnly" ) + atsargv.append( "--logs=%s" % options.logs ) if config.batch_interactive: - atsargv.append("--allInteractive") - atsargv.extend(config.machine_options) + atsargv.append( "--allInteractive" ) + atsargv.extend( config.machine_options ) for x in options.ats: # Add the appropriate argument indicators back based on their length - if len(x[0]) == 1: - x[0] = '-' + x[0] + if len( x[ 0 ] ) == 1: + x[ 0 ] = '-' + x[ 0 ] else: - x[0] = '--' + x[0] - atsargv.extend(x) + x[ 0 ] = '--' + x[ 0 ] + atsargv.extend( x ) - for f in os.environ.get('ATS_FILTER', '').split(','): - atsargv.extend(['-f', f]) + for f in os.environ.get( 'ATS_FILTER', '' ).split( ',' ): + atsargv.extend( [ '-f', f ] ) - atsargv.append(options.ats_target) + atsargv.append( options.ats_target ) sys.argv = atsargv -def write_log_dir_summary(logdir, originalargv): +def write_log_dir_summary( logdir, originalargv ): from geos_ats import configuration_record - with open(os.path.join(logdir, "geos_ats.config"), "w") as logconfig: - tmp = " ".join(originalargv[1:]) - logconfig.write(f'Run with: "{tmp}"\n') - configuration_record.infoConfigShow(True, logconfig) + with open( os.path.join( logdir, "geos_ats.config" ), "w" ) as logconfig: + tmp = " ".join( originalargv[ 1: ] ) + logconfig.write( f'Run with: "{tmp}"\n' ) + configuration_record.infoConfigShow( True, logconfig ) -def handleShutdown(signum, frame): +def handleShutdown( signum, frame ): if current_jobid is not None: term = "scancel -n %s" % current_jobid - subprocess.call(term, shell=True) - sys.exit(1) + subprocess.call( term, shell=True ) + sys.exit( 1 ) -def handle_salloc_relaunch(options, originalargv, configOverride): +def handle_salloc_relaunch( options, originalargv, configOverride ): tests = [ options.action in test_actions, options.salloc, options.machine - in ("SlurmProcessorScheduled", "GeosAtsSlurmProcessorScheduled"), "SLURM_JOB_ID" not in os.environ + in ( "SlurmProcessorScheduled", "GeosAtsSlurmProcessorScheduled" ), "SLURM_JOB_ID" not in os.environ ] - if all(tests): + if all( tests ): if options.sallocOptions != "": - sallocCommand = ["salloc"] + options.sallocOptions.split(" ") + sallocCommand = [ "salloc" ] + options.sallocOptions.split( " " ) else: - sallocCommand = ["salloc", "-ppdebug", "--exclusive", "-N", "%d" % options.numNodes] + sallocCommand = [ "salloc", "-ppdebug", "--exclusive", "-N", "%d" % options.numNodes ] if "testmodifier" in configOverride: - if configOverride["testmodifier"] == "memcheck": - p = subprocess.Popen(['sinfo', '-o', '%l', '-h', '-ppdebug'], stdout=subprocess.PIPE) + if configOverride[ "testmodifier" ] == "memcheck": + p = subprocess.Popen( [ 'sinfo', '-o', '%l', '-h', '-ppdebug' ], stdout=subprocess.PIPE ) out, err = p.communicate() - tarray = out.split(":") + tarray = out.split( ":" ) seconds = tarray.pop() minutes = tarray.pop() hours = 0 days = 0 - if len(tarray) > 0: + if len( tarray ) > 0: hours = tarray.pop() try: - days, hours = hours.split('-') + days, hours = hours.split( '-' ) except ValueError as e: - logger.debug(e) - limit = min(360, (24 * int(days) + int(hours)) * 60 + int(minutes)) - sallocCommand.extend(["-t", "%d" % limit]) + logger.debug( e ) + limit = min( 360, ( 24 * int( days ) + int( hours ) ) * 60 + int( minutes ) ) + sallocCommand.extend( [ "-t", "%d" % limit ] ) # generate a "unique" name for the salloc job so we can remove it later - timeNow = time.strftime('%H%M%S', time.localtime()) + timeNow = time.strftime( '%H%M%S', time.localtime() ) current_jobid = "geos_ats_%s" % timeNow # add the name to the arguments (this will override any previous name specification) - sallocCommand.extend(["-J", "%s" % current_jobid]) + sallocCommand.extend( [ "-J", "%s" % current_jobid ] ) # register our signal handler - signal.signal(signal.SIGTERM, handleShutdown) + signal.signal( signal.SIGTERM, handleShutdown ) command = sallocCommand # omit --workingDir on relaunch, as we have already changed directories - relaunchargv = [x for x in originalargv if not x.startswith("--workingDir")] + relaunchargv = [ x for x in originalargv if not x.startswith( "--workingDir" ) ] command += relaunchargv - command += ["--logs=%s" % options.logs] - p = subprocess.Popen(command) + command += [ "--logs=%s" % options.logs ] + p = subprocess.Popen( command ) p.wait() - sys.exit(p.returncode) + sys.exit( p.returncode ) def getLogDirBaseName(): return "TestLogs" -def create_log_directory(options): +def create_log_directory( options ): """ When the action will run tests (e.g. "run", "rerun", "check", "continue", then the LogDir is numbered, and saved. When the action does not run @@ -126,118 +126,119 @@ def create_log_directory(options): basename = getLogDirBaseName() index = 1 while True: - options.logs = "%s.%03d" % (basename, index) - if not os.path.exists(options.logs): + options.logs = "%s.%03d" % ( basename, index ) + if not os.path.exists( options.logs ): break index += 1 # make the options.logs - os.mkdir(options.logs) + os.mkdir( options.logs ) # make symlink try: - if os.path.exists(basename): - if os.path.islink(basename): - os.remove(basename) + if os.path.exists( basename ): + if os.path.islink( basename ): + os.remove( basename ) else: - logger.error(f"unable to replace {basename} with a symlink to {options.logs}") + logger.error( f"unable to replace {basename} with a symlink to {options.logs}" ) - if not os.path.exists(basename): - os.symlink(options.logs, basename) + if not os.path.exists( basename ): + os.symlink( options.logs, basename ) except: - logger.error("unable to name a symlink to to logdir") + logger.error( "unable to name a symlink to to logdir" ) else: if options.action in test_actions: - options.logs = "%s.%s" % (getLogDirBaseName(), options.action) + options.logs = "%s.%s" % ( getLogDirBaseName(), options.action ) elif options.info: - options.logs = "%s.info" % (getLogDirBaseName()) + options.logs = "%s.info" % ( getLogDirBaseName() ) else: - if not os.path.join(options.logs): - os.mkdir(options.logs) + if not os.path.join( options.logs ): + os.mkdir( options.logs ) -def check_timing_file(options, config): - if options.action in ["run", "rerun", "continue"]: +def check_timing_file( options, config ): + if options.action in [ "run", "rerun", "continue" ]: if config.timing_file: - if not os.path.isfile(config.timing_file): - logger.warning(f'Timing file does not exist {config.timing_file}') + if not os.path.isfile( config.timing_file ): + logger.warning( f'Timing file does not exist {config.timing_file}' ) return from geos_ats import configuration_record - with open(config.timing_file, "r") as filep: + with open( config.timing_file, "r" ) as filep: for line in filep: - if not line.startswith('#'): + if not line.startswith( '#' ): tokens = line.split() - configuration_record.globalTestTimings[tokens[0]] = int(tokens[1]) + configuration_record.globalTestTimings[ tokens[ 0 ] ] = int( tokens[ 1 ] ) -def infoOptions(title, options): +def infoOptions( title, options ): from geos_ats import common_utilities - topic = common_utilities.InfoTopic(title) + topic = common_utilities.InfoTopic( title ) topic.startBanner() - table = common_utilities.TextTable(2) + table = common_utilities.TextTable( 2 ) for opt, desc in options: - table.addRow(opt, desc) + table.addRow( opt, desc ) table.printTable() topic.endBanner() -def info(args): - from geos_ats import (common_utilities, configuration_record, test_steps, suite_settings, test_case, test_modifier) - - infoLabels = lambda *x: suite_settings.infoLabels(suite_settings.__file__) - infoOwners = lambda *x: suite_settings.infoOwners(suite_settings.__file__) - - menu = common_utilities.InfoTopic("geos_ats info menu") - menu.addTopic("teststep", "Reference on all the TestStep", test_steps.infoTestSteps) - menu.addTopic("testcase", "Reference on the TestCase", test_case.infoTestCase) - menu.addTopic("labels", "List of labels", infoLabels) - menu.addTopic("owners", "List of owners", infoOwners) - menu.addTopic("config", "Reference on config options", configuration_record.infoConfig) - menu.addTopic("actions", "Description of the command line action options", - lambda *x: infoOptions("command line actions", command_line_parsers.action_ptions)) - menu.addTopic("checks", "Description of the command line check options", - lambda *x: infoOptions("command line checks", command_line_parsers.check_options)) - menu.addTopic("modifiers", "List of test modifiers", test_modifier.infoTestModifier) +def info( args ): + from geos_ats import ( common_utilities, configuration_record, test_steps, suite_settings, test_case, + test_modifier ) + + infoLabels = lambda *x: suite_settings.infoLabels( suite_settings.__file__ ) + infoOwners = lambda *x: suite_settings.infoOwners( suite_settings.__file__ ) + + menu = common_utilities.InfoTopic( "geos_ats info menu" ) + menu.addTopic( "teststep", "Reference on all the TestStep", test_steps.infoTestSteps ) + menu.addTopic( "testcase", "Reference on the TestCase", test_case.infoTestCase ) + menu.addTopic( "labels", "List of labels", infoLabels ) + menu.addTopic( "owners", "List of owners", infoOwners ) + menu.addTopic( "config", "Reference on config options", configuration_record.infoConfig ) + menu.addTopic( "actions", "Description of the command line action options", + lambda *x: infoOptions( "command line actions", command_line_parsers.action_ptions ) ) + menu.addTopic( "checks", "Description of the command line check options", + lambda *x: infoOptions( "command line checks", command_line_parsers.check_options ) ) + menu.addTopic( "modifiers", "List of test modifiers", test_modifier.infoTestModifier ) # menu.addTopic("testconfig", "Information on the testconfig.py file", # lambda *x: infoParagraph("testconfig", command_line_parsers.test_config_info)) - menu.process(args) + menu.process( args ) -def report(manager): +def report( manager ): """The report action""" - from geos_ats import (reporting, configuration_record) + from geos_ats import ( reporting, configuration_record ) if configuration_record.config.report_html: - reporter = reporting.ReportHTML(manager.testlist) + reporter = reporting.ReportHTML( manager.testlist ) reporter.report() if configuration_record.config.report_ini: - reporter = reporting.ReportIni(manager.testlist) - with open(configuration_record.config.report_ini_file, "w") as filep: - reporter.report(filep) + reporter = reporting.ReportIni( manager.testlist ) + with open( configuration_record.config.report_ini_file, "w" ) as filep: + reporter.report( filep ) -def summary(manager, alog, short=False): +def summary( manager, alog, short=False ): """Periodic summary and final summary""" - from geos_ats import (reporting, configuration_record) + from geos_ats import ( reporting, configuration_record ) - if len(manager.testlist) == 0: + if len( manager.testlist ) == 0: return if configuration_record.config.report_html and configuration_record.config.report_html_periodic: - reporter = reporting.ReportHTML(manager.testlist) - reporter.report(refresh=30) + reporter = reporting.ReportHTML( manager.testlist ) + reporter.report( refresh=30 ) -def append_geos_ats_summary(manager): +def append_geos_ats_summary( manager ): initial_summary = manager.summary - def new_summary(*xargs, **kwargs): - initial_summary(*xargs, **kwargs) - summary(manager, None) + def new_summary( *xargs, **kwargs ): + initial_summary( *xargs, **kwargs ) + summary( manager, None ) manager.summary = new_summary @@ -250,8 +251,8 @@ def main(): # --------------------------------- # Handle command line arguments # --------------------------------- - originalargv = sys.argv[:] - options = command_line_parsers.parse_command_line_arguments(originalargv) + originalargv = sys.argv[ : ] + options = command_line_parsers.parse_command_line_arguments( originalargv ) # Set logging verbosity verbosity_options = { @@ -260,24 +261,24 @@ def main(): 'warning': logging.WARNING, 'error': logging.ERROR } - logger.setLevel(verbosity_options[options.verbose]) + logger.setLevel( verbosity_options[ options.verbose ] ) # Set key environment variables before importing ats from geos_ats import machines search_path = '' if options.machine_dir is not None: - if os.path.isdir(options.machine_dir): + if os.path.isdir( options.machine_dir ): search_path = options.machine_dir else: - logger.error(f'Target machine dir does not exist: {options.machine_dir}') - logger.error('geos_ats will continue searching in the default path') + logger.error( f'Target machine dir does not exist: {options.machine_dir}' ) + logger.error( 'geos_ats will continue searching in the default path' ) if not search_path: - search_path = os.path.dirname(machines.__file__) - os.environ['MACHINE_DIR'] = search_path + search_path = os.path.dirname( machines.__file__ ) + os.environ[ 'MACHINE_DIR' ] = search_path if options.machine: - os.environ["MACHINE_TYPE"] = options.machine + os.environ[ "MACHINE_TYPE" ] = options.machine # --------------------------------- # Setup ATS @@ -287,14 +288,14 @@ def main(): configFile = '' # Setup paths - ats_root_dir = os.path.abspath(os.path.dirname(options.ats_target)) - os.chdir(ats_root_dir) - os.makedirs(options.workingDir, exist_ok=True) - create_log_directory(options) + ats_root_dir = os.path.abspath( os.path.dirname( options.ats_target ) ) + os.chdir( ats_root_dir ) + os.makedirs( options.workingDir, exist_ok=True ) + create_log_directory( options ) # Check the test configuration from geos_ats import configuration_record - configuration_record.initializeConfig(configFile, configOverride, options) + configuration_record.initializeConfig( configFile, configOverride, options ) config = configuration_record.config config.geos_bin_dir = options.geos_bin_dir @@ -302,24 +303,24 @@ def main(): if 'skip_missing' in r: config.restart_skip_missing = True elif 'exclude' in r: - config.restart_exclude_pattern.append(r[-1]) + config.restart_exclude_pattern.append( r[ -1 ] ) # Check the report location if options.logs: - config.report_html_file = os.path.join(options.logs, 'test_results.html') - config.report_ini_file = os.path.join(options.logs, 'test_results.ini') + config.report_html_file = os.path.join( options.logs, 'test_results.html' ) + config.report_ini_file = os.path.join( options.logs, 'test_results.ini' ) - build_ats_arguments(options, originalargv, config) + build_ats_arguments( options, originalargv, config ) # Additional setup tasks - check_timing_file(options, config) - handle_salloc_relaunch(options, originalargv, configOverride) + check_timing_file( options, config ) + handle_salloc_relaunch( options, originalargv, configOverride ) # Print config information - logger.debug("*" * 80) + logger.debug( "*" * 80 ) for notation in config.report_notations: - logger.debug(notation) - logger.debug("*" * 80) + logger.debug( notation ) + logger.debug( "*" * 80 ) # --------------------------------- # Initialize ATS @@ -329,38 +330,38 @@ def main(): # Note: the sys.argv is read here by default import ats # type: ignore[import] ats.manager.init() - logger.debug('Copying options to the geos_ats config record file') - config.copy_values(ats.manager.machine) + logger.debug( 'Copying options to the geos_ats config record file' ) + config.copy_values( ats.manager.machine ) # Glue global values - ats.AtsTest.glue(action=options.action) - ats.AtsTest.glue(checkoption=options.check) - ats.AtsTest.glue(configFile=configFile) - ats.AtsTest.glue(configOverride=configOverride) - ats.AtsTest.glue(testmode=False) - ats.AtsTest.glue(workingDir=options.workingDir) - ats.AtsTest.glue(baselineDir=options.baselineDir) - ats.AtsTest.glue(logDir=options.logs) - ats.AtsTest.glue(atsRootDir=ats_root_dir) - ats.AtsTest.glue(atsFlags=options.ats) - ats.AtsTest.glue(atsFiles=options.ats_target) - ats.AtsTest.glue(machine=options.machine) - ats.AtsTest.glue(config=config) - if len(testcases): - ats.AtsTest.glue(testcases=testcases) + ats.AtsTest.glue( action=options.action ) + ats.AtsTest.glue( checkoption=options.check ) + ats.AtsTest.glue( configFile=configFile ) + ats.AtsTest.glue( configOverride=configOverride ) + ats.AtsTest.glue( testmode=False ) + ats.AtsTest.glue( workingDir=options.workingDir ) + ats.AtsTest.glue( baselineDir=options.baselineDir ) + ats.AtsTest.glue( logDir=options.logs ) + ats.AtsTest.glue( atsRootDir=ats_root_dir ) + ats.AtsTest.glue( atsFlags=options.ats ) + ats.AtsTest.glue( atsFiles=options.ats_target ) + ats.AtsTest.glue( machine=options.machine ) + ats.AtsTest.glue( config=config ) + if len( testcases ): + ats.AtsTest.glue( testcases=testcases ) else: - ats.AtsTest.glue(testcases="all") + ats.AtsTest.glue( testcases="all" ) - from geos_ats import (common_utilities, suite_settings, test_case, test_steps) + from geos_ats import ( common_utilities, suite_settings, test_case, test_steps ) # Set ats options - append_geos_ats_summary(ats.manager) + append_geos_ats_summary( ats.manager ) ats.manager.machine.naptime = 0.2 ats.log.echo = True # Logging - if options.action in ("run", "rerun", "check", "continue"): - write_log_dir_summary(options.logs, originalargv) + if options.action in ( "run", "rerun", "check", "continue" ): + write_log_dir_summary( options.logs, originalargv ) if options.action in test_actions: ats.manager.firstBanner() @@ -369,49 +370,49 @@ def main(): # Run ATS # --------------------------------- result = ats.manager.core() - if len(test_builder.test_build_failures): - tmp = ', '.join(test_builder.test_build_failures) - logger.error(f'The following ATS test failed to build: {tmp}') + if len( test_builder.test_build_failures ): + tmp = ', '.join( test_builder.test_build_failures ) + logger.error( f'The following ATS test failed to build: {tmp}' ) if not options.allow_failed_tests: - raise Exception('Some tests failed to build') + raise Exception( 'Some tests failed to build' ) # Make sure all the testcases requested were found if testcases != "all": - if len(testcases): - logger.error(f"ERROR: Unknown testcases {str(testcases)}") - logger.error(f"ATS files: {str(ats_files)}") - sys.exit(1) + if len( testcases ): + logger.error( f"ERROR: Unknown testcases {str(testcases)}" ) + logger.error( f"ATS files: {str(ats_files)}" ) + sys.exit( 1 ) # Report: if options.action in report_actions: - report(ats.manager) + report( ats.manager ) # clean if options.action == "veryclean": - common_utilities.removeLogDirectories(os.getcwd()) - files = [config.report_html_file, config.report_ini_file] + common_utilities.removeLogDirectories( os.getcwd() ) + files = [ config.report_html_file, config.report_ini_file ] for f in files: - if os.path.exists(f): - os.remove(f) + if os.path.exists( f ): + os.remove( f ) # clean the temporary logfile that is not needed for certain actions. if options.action not in test_actions: if options.logs is not None: - if os.path.exists(options.logs): - shutil.rmtree(options.logs) + if os.path.exists( options.logs ): + shutil.rmtree( options.logs ) # return 0 if all tests passed, 1 otherwise try: if options.failIfTestsFail: - with open(os.path.join(options.logs, "test_results.html"), 'r') as f: - contents = ''.join(f.readlines()).split("DETAILED RESULTS")[1] + with open( os.path.join( options.logs, "test_results.html" ), 'r' ) as f: + contents = ''.join( f.readlines() ).split( "DETAILED RESULTS" )[ 1 ] messages = [ "class=\"red\">FAIL", "class=\"yellow\">SKIPPED", "class=\"reddish\">FAIL", "class=\"yellow\">NOT RUN" ] - result = any([m in contents for m in messages]) + result = any( [ m in contents for m in messages ] ) except IOError as e: - logger.debug(e) + logger.debug( e ) # Other ATS steps not previously included: ats.manager.postprocess() @@ -420,13 +421,13 @@ def main(): ats.manager.finalBanner() # Remove unnecessary log dirs created with clean runs - none_dir = os.path.join(options.workingDir, 'None') - if os.path.exists(none_dir): - shutil.rmtree(none_dir) + none_dir = os.path.join( options.workingDir, 'None' ) + if os.path.exists( none_dir ): + shutil.rmtree( none_dir ) return result if __name__ == "__main__": result = main() - sys.exit(result) + sys.exit( result ) diff --git a/geos_ats_package/geos_ats/reporting.py b/geos_ats_package/geos_ats/reporting.py index 1b35a0e..a5b0199 100644 --- a/geos_ats_package/geos_ats/reporting.py +++ b/geos_ats_package/geos_ats/reporting.py @@ -7,52 +7,52 @@ import glob import logging from ats.times import hms -from ats import (PASSED, FAILED, TIMEDOUT, EXPECTED, BATCHED, FILTERED, SKIPPED, CREATED, RUNNING, HALTED, LSFERROR) +from ats import ( PASSED, FAILED, TIMEDOUT, EXPECTED, BATCHED, FILTERED, SKIPPED, CREATED, RUNNING, HALTED, LSFERROR ) # Get the active logger instance -logger = logging.getLogger('geos_ats') +logger = logging.getLogger( 'geos_ats' ) # Status value in priority order -STATUS = (EXPECTED, CREATED, BATCHED, FILTERED, SKIPPED, RUNNING, PASSED, TIMEDOUT, HALTED, LSFERROR, FAILED) +STATUS = ( EXPECTED, CREATED, BATCHED, FILTERED, SKIPPED, RUNNING, PASSED, TIMEDOUT, HALTED, LSFERROR, FAILED ) COLORS = {} -COLORS[EXPECTED.name] = "black" -COLORS[CREATED.name] = "black" -COLORS[BATCHED.name] = "black" -COLORS[FILTERED.name] = "black" -COLORS[SKIPPED.name] = "orange" -COLORS[RUNNING.name] = "blue" -COLORS[PASSED.name] = "green" -COLORS[TIMEDOUT.name] = "red" -COLORS[HALTED.name] = "brown" -COLORS[LSFERROR.name] = "brown" -COLORS[FAILED.name] = "red" - - -def max_status(sa, sb): - Ia = STATUS.index(sa) - Ib = STATUS.index(sb) - return STATUS[max(Ia, Ib)] - - -class ReportBase(object): +COLORS[ EXPECTED.name ] = "black" +COLORS[ CREATED.name ] = "black" +COLORS[ BATCHED.name ] = "black" +COLORS[ FILTERED.name ] = "black" +COLORS[ SKIPPED.name ] = "orange" +COLORS[ RUNNING.name ] = "blue" +COLORS[ PASSED.name ] = "green" +COLORS[ TIMEDOUT.name ] = "red" +COLORS[ HALTED.name ] = "brown" +COLORS[ LSFERROR.name ] = "brown" +COLORS[ FAILED.name ] = "red" + + +def max_status( sa, sb ): + Ia = STATUS.index( sa ) + Ib = STATUS.index( sb ) + return STATUS[ max( Ia, Ib ) ] + + +class ReportBase( object ): """Base class for reporting""" - def __init__(self, test_steps): + def __init__( self, test_steps ): self.test_results = {} self.test_groups = {} self.status_lists = {} for t in test_steps: # Parse the test step name - step_name = t.name[t.name.find('(') + 1:t.name.rfind('_')] - test_name = step_name[:step_name.rfind('_')] + step_name = t.name[ t.name.find( '(' ) + 1:t.name.rfind( '_' ) ] + test_name = step_name[ :step_name.rfind( '_' ) ] test_id = t.group.number - group_name = test_name[:test_name.rfind('_')] + group_name = test_name[ :test_name.rfind( '_' ) ] # Save data if test_name not in self.test_results: - self.test_results[test_name] = { + self.test_results[ test_name ] = { 'steps': {}, 'status': EXPECTED, 'id': test_id, @@ -60,7 +60,7 @@ def __init__(self, test_steps): 'current_step': ' ', 'resources': t.np } - self.test_results[test_name]['steps'][t.name] = { + self.test_results[ test_name ][ 'steps' ][ t.name ] = { 'status': t.status, 'log': t.outname, 'output': t.step_outputs, @@ -69,75 +69,77 @@ def __init__(self, test_steps): # Check elapsed time elapsed = 0.0 - if hasattr(t, 'endTime'): + if hasattr( t, 'endTime' ): elapsed = t.endTime - t.startTime - self.test_results[test_name]['steps'][t.name]['elapsed'] = elapsed - self.test_results[test_name]['elapsed'] += elapsed + self.test_results[ test_name ][ 'steps' ][ t.name ][ 'elapsed' ] = elapsed + self.test_results[ test_name ][ 'elapsed' ] += elapsed # Check the status and the latest step - self.test_results[test_name]['status'] = max_status(t.status, self.test_results[test_name]['status']) - if t.status not in (EXPECTED, CREATED, BATCHED, FILTERED, SKIPPED): - self.test_results[test_name]['current_step'] = t.name + self.test_results[ test_name ][ 'status' ] = max_status( t.status, + self.test_results[ test_name ][ 'status' ] ) + if t.status not in ( EXPECTED, CREATED, BATCHED, FILTERED, SKIPPED ): + self.test_results[ test_name ][ 'current_step' ] = t.name if group_name not in self.test_groups: - self.test_groups[group_name] = {'tests': [], 'status': EXPECTED} - self.test_groups[group_name]['tests'].append(test_name) - self.test_groups[group_name]['status'] = max_status(t.status, self.test_groups[group_name]['status']) + self.test_groups[ group_name ] = { 'tests': [], 'status': EXPECTED } + self.test_groups[ group_name ][ 'tests' ].append( test_name ) + self.test_groups[ group_name ][ 'status' ] = max_status( t.status, + self.test_groups[ group_name ][ 'status' ] ) # Collect status names for s in STATUS: - self.status_lists[s.name] = [k for k, v in self.test_results.items() if v['status'] == s] + self.status_lists[ s.name ] = [ k for k, v in self.test_results.items() if v[ 'status' ] == s ] self.html_filename = config.report_html_file -class ReportIni(ReportBase): +class ReportIni( ReportBase ): """Minimal reporting class""" - def report(self, fp): + def report( self, fp ): configParser = ConfigParser() - configParser.add_section("Info") - configParser.set("Info", "Time", time.strftime("%a, %d %b %Y %H:%M:%S")) + configParser.add_section( "Info" ) + configParser.set( "Info", "Time", time.strftime( "%a, %d %b %Y %H:%M:%S" ) ) try: platform = socket.gethostname() except: - logger.debug("Could not get host name") + logger.debug( "Could not get host name" ) platform = "unknown" - configParser.set("Info", "Platform", platform) + configParser.set( "Info", "Platform", platform ) extraNotations = "" for line in config.report_notations: - line_split = line.split(":") - if len(line_split) != 2: - line_split = line.split("=") - if len(line_split) != 2: + line_split = line.split( ":" ) + if len( line_split ) != 2: + line_split = line.split( "=" ) + if len( line_split ) != 2: extraNotations += "\"" + line.strip() + "\"" continue - configParser.set("Info", line_split[0].strip(), line_split[1].strip()) + configParser.set( "Info", line_split[ 0 ].strip(), line_split[ 1 ].strip() ) if extraNotations != "": - configParser.set("Info", "Extra Notations", extraNotations) + configParser.set( "Info", "Extra Notations", extraNotations ) - configParser.add_section("Results") + configParser.add_section( "Results" ) for k, v in self.status_lists.items(): - configParser.set("Results", k, ";".join(sorted(v))) + configParser.set( "Results", k, ";".join( sorted( v ) ) ) - configParser.write(fp) + configParser.write( fp ) -class ReportHTML(ReportBase): +class ReportHTML( ReportBase ): """HTML Reporting""" - def report(self, refresh=0): - sp = open(self.html_filename, 'w') - self.writeHeader(sp, refresh) - self.writeSummary(sp) - self.writeTable(sp) - self.writeFooter(sp) + def report( self, refresh=0 ): + sp = open( self.html_filename, 'w' ) + self.writeHeader( sp, refresh ) + self.writeSummary( sp ) + self.writeTable( sp ) + self.writeFooter( sp ) sp.close() - def writeHeader(self, sp, refresh): - gentime = time.strftime("%a, %d %b %Y %H:%M:%S") + def writeHeader( self, sp, refresh ): + gentime = time.strftime( "%a, %d %b %Y %H:%M:%S" ) header = """ @@ -208,37 +210,37 @@ def writeHeader(self, sp, refresh): try: platform = socket.gethostname() except: - logger.debug("Could not get host name") + logger.debug( "Could not get host name" ) platform = "unknown" if os.name == "nt": - username = os.getenv("USERNAME") + username = os.getenv( "USERNAME" ) else: - username = os.getenv("USER") + username = os.getenv( "USER" ) header += "

GEOS ATS Report

\n

Configuration

\n" - table = [['Test Results', gentime], ['User', username], ['Platform', platform]] - header += tabulate(table, tablefmt='html') + table = [ [ 'Test Results', gentime ], [ 'User', username ], [ 'Platform', platform ] ] + header += tabulate( table, tablefmt='html' ) header += '\n' - sp.write(header) + sp.write( header ) - def writeSummary(self, sp): + def writeSummary( self, sp ): link_pattern = '{}\n' color_pattern = "

{}

" - header = ['Status', 'Count', 'Tests'] + header = [ 'Status', 'Count', 'Tests' ] table = [] for k, v in self.status_lists.items(): - status_formatted = color_pattern.format(COLORS[k], k) - test_links = [link_pattern.format(t, t) for t in v] - table.append([status_formatted, len(v), ', '.join(test_links)]) + status_formatted = color_pattern.format( COLORS[ k ], k ) + test_links = [ link_pattern.format( t, t ) for t in v ] + table.append( [ status_formatted, len( v ), ', '.join( test_links ) ] ) - sp.write("\n\n

Summary

\n\n") - table_html = tabulate(table, headers=header, tablefmt='unsafehtml') - sp.write(table_html) + sp.write( "\n\n

Summary

\n\n" ) + table_html = tabulate( table, headers=header, tablefmt='unsafehtml' ) + sp.write( table_html ) - def writeTable(self, sp): - header = ("Status", "Name", "TestStep", "Elapsed", "Resources", "Output") + def writeTable( self, sp ): + header = ( "Status", "Name", "TestStep", "Elapsed", "Resources", "Output" ) table = [] table_filt = [] @@ -246,40 +248,43 @@ def writeTable(self, sp): color_pattern = "

{}

" for k, v in self.test_results.items(): - status_str = v['status'].name - status_formatted = color_pattern.format(COLORS[status_str], k, status_str) - step_shortname = v['current_step'] - elapsed_formatted = hms(v['elapsed']) + status_str = v[ 'status' ].name + status_formatted = color_pattern.format( COLORS[ status_str ], k, status_str ) + step_shortname = v[ 'current_step' ] + elapsed_formatted = hms( v[ 'elapsed' ] ) output_files = [] - for s in v['steps'].values(): - if os.path.isfile(s['log']): - output_files.append(file_pattern.format(s['log'], os.path.basename(s['log']))) - if os.path.isfile(s['log'] + '.err'): - output_files.append(file_pattern.format(s['log'] + '.err', os.path.basename(s['log'] + '.err'))) - for pattern in s['output']: - for f in sorted(glob.glob(pattern)): - if (('restart' not in f) or ('.restartcheck' in f)) and os.path.isfile(f): - output_files.append(file_pattern.format(f, os.path.basename(f))) - - row = [status_formatted, k, step_shortname, elapsed_formatted, v['resources'], ', '.join(output_files)] + for s in v[ 'steps' ].values(): + if os.path.isfile( s[ 'log' ] ): + output_files.append( file_pattern.format( s[ 'log' ], os.path.basename( s[ 'log' ] ) ) ) + if os.path.isfile( s[ 'log' ] + '.err' ): + output_files.append( + file_pattern.format( s[ 'log' ] + '.err', os.path.basename( s[ 'log' ] + '.err' ) ) ) + for pattern in s[ 'output' ]: + for f in sorted( glob.glob( pattern ) ): + if ( ( 'restart' not in f ) or ( '.restartcheck' in f ) ) and os.path.isfile( f ): + output_files.append( file_pattern.format( f, os.path.basename( f ) ) ) + + row = [ + status_formatted, k, step_shortname, elapsed_formatted, v[ 'resources' ], ', '.join( output_files ) + ] if status_str == 'FILTERED': - table_filt.append(row) + table_filt.append( row ) else: - table.append(row) + table.append( row ) - if len(table): - sp.write("\n\n

Active Tests

\n\n") - table_html = tabulate(table, headers=header, tablefmt='unsafehtml') - sp.write(table_html) + if len( table ): + sp.write( "\n\n

Active Tests

\n\n" ) + table_html = tabulate( table, headers=header, tablefmt='unsafehtml' ) + sp.write( table_html ) - if len(table_filt): - sp.write("\n\n

Filtered Tests

\n\n") - table_html = tabulate(table_filt, headers=header, tablefmt='unsafehtml') - sp.write(table_html) + if len( table_filt ): + sp.write( "\n\n

Filtered Tests

\n\n" ) + table_html = tabulate( table_filt, headers=header, tablefmt='unsafehtml' ) + sp.write( table_html ) - def writeFooter(self, sp): + def writeFooter( self, sp ): footer = """ """ - sp.write(footer) + sp.write( footer ) diff --git a/geos_ats_package/geos_ats/test_builder.py b/geos_ats_package/geos_ats/test_builder.py index 156a85c..cb1de71 100644 --- a/geos_ats_package/geos_ats/test_builder.py +++ b/geos_ats_package/geos_ats/test_builder.py @@ -12,42 +12,42 @@ from .test_case import TestCase test_build_failures = [] -logger = logging.getLogger('geos_ats') +logger = logging.getLogger( 'geos_ats' ) -@dataclass(frozen=True) +@dataclass( frozen=True ) class RestartcheckParameters: atol: float rtol: float - def as_dict(self): - return asdict(self) + def as_dict( self ): + return asdict( self ) -@dataclass(frozen=True) +@dataclass( frozen=True ) class CurveCheckParameters: filename: str - tolerance: Iterable[float] - curves: List[List[str]] - script_instructions: Iterable[Iterable[str]] = None + tolerance: Iterable[ float ] + curves: List[ List[ str ] ] + script_instructions: Iterable[ Iterable[ str ] ] = None time_units: str = "seconds" - def as_dict(self): - return asdict(self) + def as_dict( self ): + return asdict( self ) -@dataclass(frozen=True) +@dataclass( frozen=True ) class TestDeck: name: str description: str - partitions: Iterable[Tuple[int, int, int]] + partitions: Iterable[ Tuple[ int, int, int ] ] restart_step: int check_step: int restartcheck_params: RestartcheckParameters = None curvecheck_params: CurveCheckParameters = None -def collect_block_names(fname): +def collect_block_names( fname ): """ Collect block names in an xml file @@ -58,35 +58,35 @@ def collect_block_names(fname): dict: Pairs of top-level block names and lists of child block names """ pwd = os.getcwd() - actual_dir, actual_fname = os.path.split(os.path.realpath(fname)) - os.chdir(actual_dir) + actual_dir, actual_fname = os.path.split( os.path.realpath( fname ) ) + os.chdir( actual_dir ) # Collect the block names in this file results = {} - parser = etree.XMLParser(remove_comments=True) - tree = etree.parse(actual_fname, parser=parser) + parser = etree.XMLParser( remove_comments=True ) + tree = etree.parse( actual_fname, parser=parser ) root = tree.getroot() for child in root.getchildren(): - results[child.tag] = [grandchild.tag for grandchild in child.getchildren()] + results[ child.tag ] = [ grandchild.tag for grandchild in child.getchildren() ] # Collect block names in included files - for included_root in root.findall('Included'): - for included_file in included_root.findall('File'): - f = included_file.get('name') - child_results = collect_block_names(f) + for included_root in root.findall( 'Included' ): + for included_file in included_root.findall( 'File' ): + f = included_file.get( 'name' ) + child_results = collect_block_names( f ) for k, v in child_results.items(): if k in results: - results[k].extend(v) + results[ k ].extend( v ) else: - results[k] = v - os.chdir(pwd) + results[ k ] = v + os.chdir( pwd ) return results -def generate_geos_tests(decks: Iterable[TestDeck], test_type='smoke'): +def generate_geos_tests( decks: Iterable[ TestDeck ], test_type='smoke' ): """ """ - for ii, deck in enumerate(decks): + for ii, deck in enumerate( decks ): restartcheck_params = None curvecheck_params = None @@ -101,62 +101,62 @@ def generate_geos_tests(decks: Iterable[TestDeck], test_type='smoke'): nx, ny, nz = partition N = nx * ny * nz - testcase_name = "{}_{:02d}".format(deck.name, N) - base_name = "0to{:d}".format(deck.check_step) + testcase_name = "{}_{:02d}".format( deck.name, N ) + base_name = "0to{:d}".format( deck.check_step ) # Search for the target xml file xml_file = '' - for suffix in ['', f'_{test_type}']: - if os.path.isfile("{}{}.xml".format(deck.name, suffix)): - xml_file = "{}{}.xml".format(deck.name, suffix) + for suffix in [ '', f'_{test_type}' ]: + if os.path.isfile( "{}{}.xml".format( deck.name, suffix ) ): + xml_file = "{}{}.xml".format( deck.name, suffix ) if not xml_file: - logger.error(f'Could not find a matching xml file for the test: {deck.name}') - test_build_failures.append(deck.name) + logger.error( f'Could not find a matching xml file for the test: {deck.name}' ) + test_build_failures.append( deck.name ) continue - xml_blocks = collect_block_names(xml_file) + xml_blocks = collect_block_names( xml_file ) checks = [] if curvecheck_params: - checks.append('curve') + checks.append( 'curve' ) steps = [ - geos(deck=xml_file, - name=base_name, - np=N, - ngpu=N, - x_partitions=nx, - y_partitions=ny, - z_partitions=nz, - restartcheck_params=restartcheck_params, - curvecheck_params=curvecheck_params) + geos( deck=xml_file, + name=base_name, + np=N, + ngpu=N, + x_partitions=nx, + y_partitions=ny, + z_partitions=nz, + restartcheck_params=restartcheck_params, + curvecheck_params=curvecheck_params ) ] if deck.restart_step > 0: - checks.append('restart') + checks.append( 'restart' ) steps.append( - geos(deck=xml_file, - name="{:d}to{:d}".format(deck.restart_step, deck.check_step), - np=N, - ngpu=N, - x_partitions=nx, - y_partitions=ny, - z_partitions=nz, - restart_file=os.path.join(testcase_name, - "{}_restart_{:09d}".format(base_name, deck.restart_step)), - baseline_pattern=f"{base_name}_restart_[0-9]+\.root", - allow_rebaseline=False, - restartcheck_params=restartcheck_params)) - - AtsTest.stick(level=ii) - AtsTest.stick(checks=','.join(checks)) - AtsTest.stick(solvers=','.join(xml_blocks.get('Solvers', []))) - AtsTest.stick(outputs=','.join(xml_blocks.get('Outputs', []))) - AtsTest.stick(constitutive_models=','.join(xml_blocks.get('Constitutive', []))) - TestCase(name=testcase_name, - desc=deck.description, - label="auto", - owner="GEOS team", - independent=True, - steps=steps) + geos( deck=xml_file, + name="{:d}to{:d}".format( deck.restart_step, deck.check_step ), + np=N, + ngpu=N, + x_partitions=nx, + y_partitions=ny, + z_partitions=nz, + restart_file=os.path.join( testcase_name, + "{}_restart_{:09d}".format( base_name, deck.restart_step ) ), + baseline_pattern=f"{base_name}_restart_[0-9]+\.root", + allow_rebaseline=False, + restartcheck_params=restartcheck_params ) ) + + AtsTest.stick( level=ii ) + AtsTest.stick( checks=','.join( checks ) ) + AtsTest.stick( solvers=','.join( xml_blocks.get( 'Solvers', [] ) ) ) + AtsTest.stick( outputs=','.join( xml_blocks.get( 'Outputs', [] ) ) ) + AtsTest.stick( constitutive_models=','.join( xml_blocks.get( 'Constitutive', [] ) ) ) + TestCase( name=testcase_name, + desc=deck.description, + label="auto", + owner="GEOS team", + independent=True, + steps=steps ) diff --git a/geos_ats_package/geos_ats/test_case.py b/geos_ats_package/geos_ats/test_case.py index 2a43658..1bcee3e 100644 --- a/geos_ats_package/geos_ats/test_case.py +++ b/geos_ats_package/geos_ats/test_case.py @@ -6,106 +6,106 @@ import inspect from configparser import ConfigParser from ats import atsut -from ats import (PASSED, FAILED, FILTERED, SKIPPED) +from ats import ( PASSED, FAILED, FILTERED, SKIPPED ) from geos_ats.common_utilities import Error, Log, removeLogDirectories from geos_ats.configuration_record import config, globalTestTimings test = ats.manager.test testif = ats.manager.testif -logger = logging.getLogger('geos_ats') +logger = logging.getLogger( 'geos_ats' ) -class Batch(object): +class Batch( object ): """A class to represent batch options""" - def __init__(self, enabled=True, duration="1h", ppn=0, altname=None): + def __init__( self, enabled=True, duration="1h", ppn=0, altname=None ): - if enabled not in (True, False): - Error("enabled must be a boolean") + if enabled not in ( True, False ): + Error( "enabled must be a boolean" ) self.enabled = enabled self.duration = duration try: - dur = ats.Duration(duration) + dur = ats.Duration( duration ) self.durationSeconds = dur.value except ats.AtsError as e: - logger.error(e) - Error("bad time specification: %s" % duration) + logger.error( e ) + Error( "bad time specification: %s" % duration ) self.ppn = ppn # processor per node self.altname = altname # alternate name to use when launcing the batch job -class TestCase(object): +class TestCase( object ): """Encapsulates one test case, which may include many steps""" - def __init__(self, name, desc, label=None, labels=None, steps=[], **kw): + def __init__( self, name, desc, label=None, labels=None, steps=[], **kw ): try: - self.initialize(name, desc, label, labels, steps, **kw) + self.initialize( name, desc, label, labels, steps, **kw ) except Exception as e: # make sure error messages get logged, then get out of here. - logging.error(e) - Log(str(e)) - raise Exception(e) + logging.error( e ) + Log( str( e ) ) + raise Exception( e ) - def initialize(self, name, desc, label=None, labels=None, steps=[], batch=Batch(enabled=False), **kw): + def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch( enabled=False ), **kw ): self.name = name self.desc = desc self.batch = batch # Identify the location of the ats test file - ats_root_dir = ats.tests.AtsTest.getOptions().get("atsRootDir") + ats_root_dir = ats.tests.AtsTest.getOptions().get( "atsRootDir" ) self.dirname = '' for s in inspect.stack(): if ats_root_dir in s.filename: - self.dirname = os.path.dirname(s.filename) + self.dirname = os.path.dirname( s.filename ) break if not self.dirname: - logger.warning('Could not find the proper test location... defaulting to current dir') + logger.warning( 'Could not find the proper test location... defaulting to current dir' ) self.dirname = os.getcwd() # Setup paths - log_dir = ats.tests.AtsTest.getOptions().get("logDir") - working_relpath = os.path.relpath(self.dirname, ats_root_dir) - working_root = ats.tests.AtsTest.getOptions().get("workingDir") - working_dir = os.path.abspath(os.path.join(working_root, working_relpath, self.name)) + log_dir = ats.tests.AtsTest.getOptions().get( "logDir" ) + working_relpath = os.path.relpath( self.dirname, ats_root_dir ) + working_root = ats.tests.AtsTest.getOptions().get( "workingDir" ) + working_dir = os.path.abspath( os.path.join( working_root, working_relpath, self.name ) ) baseline_relpath = working_relpath - baseline_root = ats.tests.AtsTest.getOptions().get("baselineDir") - baseline_directory = os.path.abspath(os.path.join(baseline_root, baseline_relpath, self.name)) + baseline_root = ats.tests.AtsTest.getOptions().get( "baselineDir" ) + baseline_directory = os.path.abspath( os.path.join( baseline_root, baseline_relpath, self.name ) ) self.path = working_relpath try: - os.makedirs(working_dir, exist_ok=True) + os.makedirs( working_dir, exist_ok=True ) except OSError as e: - logger.debug(e) + logger.debug( e ) raise Exception() # Setup other parameters self.dictionary = {} - self.dictionary.update(kw) - self.nodoc = self.dictionary.get("nodoc", False) + self.dictionary.update( kw ) + self.nodoc = self.dictionary.get( "nodoc", False ) self.last_status = None - self.dictionary["name"] = self.name - self.dictionary["test_directory"] = self.dirname - self.dictionary["output_directory"] = working_dir - self.dictionary["baseline_directory"] = baseline_directory - self.dictionary["log_directory"] = log_dir - self.dictionary["testcase_name"] = self.name + self.dictionary[ "name" ] = self.name + self.dictionary[ "test_directory" ] = self.dirname + self.dictionary[ "output_directory" ] = working_dir + self.dictionary[ "baseline_directory" ] = baseline_directory + self.dictionary[ "log_directory" ] = log_dir + self.dictionary[ "testcase_name" ] = self.name # Check for previous log information - log_file = os.path.join(log_dir, 'test_results.ini') - if os.path.isfile(log_file): + log_file = os.path.join( log_dir, 'test_results.ini' ) + if os.path.isfile( log_file ): previous_config = ConfigParser() - previous_config.read(log_file) - for k, v in previous_config['Results'].items(): - if self.name in v.split(';'): - self.last_status = atsut.StatusCode(k.upper()) + previous_config.read( log_file ) + for k, v in previous_config[ 'Results' ].items(): + if self.name in v.split( ';' ): + self.last_status = atsut.StatusCode( k.upper() ) # check for independent if config.override_np > 0: @@ -114,12 +114,12 @@ def initialize(self, name, desc, label=None, labels=None, steps=[], batch=Batch( # number of processors. self.independent = False else: - self.independent = self.dictionary.get("independent", False) - if self.independent not in (True, False): - Error("independent must be either True or False: %s" % str(self.independent)) + self.independent = self.dictionary.get( "independent", False ) + if self.independent not in ( True, False ): + Error( "independent must be either True or False: %s" % str( self.independent ) ) # check for depends - self.depends = self.dictionary.get("depends", None) + self.depends = self.dictionary.get( "depends", None ) if self.depends == self.name: # This check avoid testcases depending on themselves. self.depends = None @@ -128,16 +128,16 @@ def initialize(self, name, desc, label=None, labels=None, steps=[], batch=Batch( # 1. update the steps with data from the dictionary # 2. substeps are inserted into the list of steps (the steps are flattened) for step in steps: - step.update(self.dictionary) + step.update( self.dictionary ) self.steps = [] for step in steps: - step.insertStep(self.steps) + step.insertStep( self.steps ) # Check for explicit skip flag - action = ats.tests.AtsTest.getOptions().get("action") - if action in ("run", "rerun", "continue"): - if self.dictionary.get("skip", None): + action = ats.tests.AtsTest.getOptions().get( "action" ) + if action in ( "run", "rerun", "continue" ): + if self.dictionary.get( "skip", None ): self.status = SKIPPED return @@ -145,7 +145,7 @@ def initialize(self, name, desc, label=None, labels=None, steps=[], batch=Batch( npMax = self.findMaxNumberOfProcessors() if config.filter_maxprocessors != -1: if npMax > config.filter_maxprocessors: - Log("# FILTER test=%s : max processors(%d > %d)" % (self.name, npMax, config.filter_maxprocessors)) + Log( "# FILTER test=%s : max processors(%d > %d)" % ( self.name, npMax, config.filter_maxprocessors ) ) self.status = FILTERED return @@ -153,67 +153,68 @@ def initialize(self, name, desc, label=None, labels=None, steps=[], batch=Batch( ngpuMax = self.findMaxNumberOfGPUs() # filter based on not enough resources - if action in ("run", "rerun", "continue"): + if action in ( "run", "rerun", "continue" ): tests = [ - not ats.tests.AtsTest.getOptions().get("testmode"), not self.batch.enabled, - hasattr(ats.manager.machine, "getNumberOfProcessors") + not ats.tests.AtsTest.getOptions().get( "testmode" ), not self.batch.enabled, + hasattr( ats.manager.machine, "getNumberOfProcessors" ) ] - if all(tests): + if all( tests ): - totalNumberOfProcessors = getattr(ats.manager.machine, "getNumberOfProcessors")() + totalNumberOfProcessors = getattr( ats.manager.machine, "getNumberOfProcessors" )() if npMax > totalNumberOfProcessors: - Log("# SKIP test=%s : not enough processors to run (%d > %d)" % - (self.name, npMax, totalNumberOfProcessors)) + Log( "# SKIP test=%s : not enough processors to run (%d > %d)" % + ( self.name, npMax, totalNumberOfProcessors ) ) self.status = SKIPPED return # If the machine doesn't specify a number of GPUs then it has none. - totalNumberOfGPUs = getattr(ats.manager.machine, "getNumberOfGPUS", lambda: 1e90)() + totalNumberOfGPUs = getattr( ats.manager.machine, "getNumberOfGPUS", lambda: 1e90 )() if ngpuMax > totalNumberOfGPUs: - Log("# SKIP test=%s : not enough gpus to run (%d > %d)" % (self.name, ngpuMax, totalNumberOfGPUs)) + Log( "# SKIP test=%s : not enough gpus to run (%d > %d)" % + ( self.name, ngpuMax, totalNumberOfGPUs ) ) self.status = SKIPPED return # filtering test steps based on action - if action in ("run", "rerun", "continue"): - checkoption = ats.tests.AtsTest.getOptions().get("checkoption") + if action in ( "run", "rerun", "continue" ): + checkoption = ats.tests.AtsTest.getOptions().get( "checkoption" ) if checkoption == "none": - self.steps = [step for step in self.steps if not step.isCheck()] + self.steps = [ step for step in self.steps if not step.isCheck() ] elif action == "check": - self.steps = [step for step in self.steps if step.isCheck()] + self.steps = [ step for step in self.steps if step.isCheck() ] # move all the delayed steps to the end reorderedSteps = [] for step in self.steps: if not step.isDelayed(): - reorderedSteps.append(step) + reorderedSteps.append( step ) for step in self.steps: if step.isDelayed(): - reorderedSteps.append(step) + reorderedSteps.append( step ) self.steps = reorderedSteps # Perform the action: - if action in ("run", "continue"): - Log("# run test=%s" % (self.name)) + if action in ( "run", "continue" ): + Log( "# run test=%s" % ( self.name ) ) self.testCreate() elif action == "rerun": - Log("# rerun test=%s" % (self.name)) + Log( "# rerun test=%s" % ( self.name ) ) self.testCreate() elif action == "check": - Log("# check test=%s" % (self.name)) + Log( "# check test=%s" % ( self.name ) ) self.testCreate() elif action == "commands": self.testCommands() elif action == "clean": - Log("# clean test=%s" % (self.name)) + Log( "# clean test=%s" % ( self.name ) ) self.testClean() elif action == "veryclean": - Log("# veryclean test=%s" % (self.name)) + Log( "# veryclean test=%s" % ( self.name ) ) self.testVeryClean() elif action == "rebaseline": @@ -226,165 +227,166 @@ def initialize(self, name, desc, label=None, labels=None, steps=[], batch=Batch( self.testList() else: - Error("Unknown action?? %s" % action) + Error( "Unknown action?? %s" % action ) - def logNames(self): - return sorted(glob.glob(os.path.join(self.dictionary["log_directory"], f'*{self.name}_*'))) + def logNames( self ): + return sorted( glob.glob( os.path.join( self.dictionary[ "log_directory" ], f'*{self.name}_*' ) ) ) - def resultPaths(self, step=None): + def resultPaths( self, step=None ): """Return the paths to output files for the testcase. Used in reporting""" paths = [] if step: for x in step.resultPaths(): - fullpath = os.path.join(self.path, x) - if os.path.exists(fullpath): - paths.append(fullpath) + fullpath = os.path.join( self.path, x ) + if os.path.exists( fullpath ): + paths.append( fullpath ) return paths - def cleanLogs(self): + def cleanLogs( self ): for f in self.logNames(): - os.remove(f) + os.remove( f ) - def testClean(self): + def testClean( self ): self.cleanLogs() for step in self.steps: step.clean() - def testVeryClean(self): + def testVeryClean( self ): - def _remove(path): - delpaths = glob.glob(path) + def _remove( path ): + delpaths = glob.glob( path ) for p in delpaths: - if os.path.exists(p): + if os.path.exists( p ): try: - if os.path.isdir(p): - shutil.rmtree(p) + if os.path.isdir( p ): + shutil.rmtree( p ) else: - os.remove(p) + os.remove( p ) except OSError: pass # so that two simultaneous clean operations don't fail # clean self.testClean() # remove log directories - removeLogDirectories(os.getcwd()) + removeLogDirectories( os.getcwd() ) # remove extra files - if len(self.steps) > 0: - _remove(config.report_html_file) - _remove(self.path) - _remove("*.core") - _remove("core") - _remove("core.*") - _remove("vgcore.*") - _remove("*.btr") - _remove("TestLogs*") - _remove("*.ini") - - def findMaxNumberOfProcessors(self): + if len( self.steps ) > 0: + _remove( config.report_html_file ) + _remove( self.path ) + _remove( "*.core" ) + _remove( "core" ) + _remove( "core.*" ) + _remove( "vgcore.*" ) + _remove( "*.btr" ) + _remove( "TestLogs*" ) + _remove( "*.ini" ) + + def findMaxNumberOfProcessors( self ): npMax = 1 for step in self.steps: - np = getattr(step.p, "np", 1) - npMax = max(np, npMax) + np = getattr( step.p, "np", 1 ) + npMax = max( np, npMax ) return npMax - def findMaxNumberOfGPUs(self): + def findMaxNumberOfGPUs( self ): gpuMax = 0 for step in self.steps: - ngpu = getattr(step.p, "ngpu", 0) * getattr(step.p, "np", 1) - gpuMax = max(ngpu, gpuMax) + ngpu = getattr( step.p, "ngpu", 0 ) * getattr( step.p, "np", 1 ) + gpuMax = max( ngpu, gpuMax ) return gpuMax - def testCreate(self): + def testCreate( self ): # Remove old logs self.cleanLogs() maxnp = 1 - for stepnum, step in enumerate(self.steps): - np = getattr(step.p, "np", 1) - maxnp = max(np, maxnp) + for stepnum, step in enumerate( self.steps ): + np = getattr( step.p, "np", 1 ) + maxnp = max( np, maxnp ) if config.priority == "processors": priority = maxnp elif config.priority == "timing": - priority = max(globalTestTimings.get(self.name, 1) * maxnp, 1) + priority = max( globalTestTimings.get( self.name, 1 ) * maxnp, 1 ) else: priority = 1 # Setup a new test group atsTest = None - ats.tests.AtsTest.newGroup(priority=priority) - for stepnum, step in enumerate(self.steps): - np = getattr(step.p, "np", 1) - ngpu = getattr(step.p, "ngpu", 0) + ats.tests.AtsTest.newGroup( priority=priority ) + for stepnum, step in enumerate( self.steps ): + np = getattr( step.p, "np", 1 ) + ngpu = getattr( step.p, "ngpu", 0 ) executable = step.executable() args = step.makeArgs() - label = "%s_%d_%s" % (self.name, stepnum + 1, step.label()) + label = "%s_%d_%s" % ( self.name, stepnum + 1, step.label() ) # call either 'test' or 'testif' if atsTest is None: - func = lambda *a, **k: test(*a, **k) + func = lambda *a, **k: test( *a, **k ) else: - func = lambda *a, **k: testif(atsTest, *a, **k) + func = lambda *a, **k: testif( atsTest, *a, **k ) # Set the time limit kw = {} if self.batch.enabled: - kw["timelimit"] = self.batch.duration - if (step.timelimit() and not config.override_timelimit): - kw["timelimit"] = step.timelimit() + kw[ "timelimit" ] = self.batch.duration + if ( step.timelimit() and not config.override_timelimit ): + kw[ "timelimit" ] = step.timelimit() else: - kw["timelimit"] = config.default_timelimit - - atsTest = func(executable=executable, - clas=args, - np=np, - ngpu=ngpu, - label=label, - serial=(not step.useMPI() and not config.script_launch), - independent=self.independent, - batch=self.batch.enabled, - **kw) + kw[ "timelimit" ] = config.default_timelimit + + atsTest = func( executable=executable, + clas=args, + np=np, + ngpu=ngpu, + label=label, + serial=( not step.useMPI() and not config.script_launch ), + independent=self.independent, + batch=self.batch.enabled, + **kw ) atsTest.step_outputs = step.resultPaths() # End the group ats.tests.AtsTest.endGroup() - def commandLine(self, step): + def commandLine( self, step ): args = [] executable = step.executable() commandArgs = step.makeArgs() - assert isinstance(commandArgs, list) + assert isinstance( commandArgs, list ) for a in commandArgs: if " " in a: - args.append('"%s"' % a) + args.append( '"%s"' % a ) else: - args.append(a) + args.append( a ) - argsstr = " ".join(args) + argsstr = " ".join( args ) return executable + " " + argsstr - def testCommands(self): - Log("\n# commands test=%s" % (self.name)) + def testCommands( self ): + Log( "\n# commands test=%s" % ( self.name ) ) for step in self.steps: - np = getattr(step.p, "np", 1) + np = getattr( step.p, "np", 1 ) usempi = step.useMPI() - stdout = getattr(step.p, "stdout", None) - commandline = self.commandLine(step).replace('%%', '%') + stdout = getattr( step.p, "stdout", None ) + commandline = self.commandLine( step ).replace( '%%', '%' ) if stdout: - Log("np=%d %s > %s" % (np, commandline, stdout)) + Log( "np=%d %s > %s" % ( np, commandline, stdout ) ) else: - Log("np=%d %s" % (np, commandline)) + Log( "np=%d %s" % ( np, commandline ) ) - def testRebaseline(self): + def testRebaseline( self ): rebaseline = True if config.rebaseline_ask: while 1: if config.rebaseline_undo: - logger.info(f"Are you sure you want to undo the rebaseline for TestCase '{self.name}'?", flush=True) + logger.info( f"Are you sure you want to undo the rebaseline for TestCase '{self.name}'?", + flush=True ) else: - logger.info(f"Are you sure you want to rebaseline TestCase '{self.name}'?", flush=True) + logger.info( f"Are you sure you want to rebaseline TestCase '{self.name}'?", flush=True ) - x = input('[y/n] ') + x = input( '[y/n] ' ) x = x.strip() if x == "y": break @@ -392,21 +394,21 @@ def testRebaseline(self): rebaseline = False break else: - Log("\n# rebaseline test=%s" % (self.name)) + Log( "\n# rebaseline test=%s" % ( self.name ) ) if rebaseline: for step in self.steps: step.rebaseline() - def testRebaselineFailed(self): + def testRebaselineFailed( self ): config.rebaseline_ask = False if self.last_status == FAILED: self.testRebaseline() - def testList(self): - Log("# test=%s : labels=%s" % (self.name.ljust(32), " ".join(self.labels))) + def testList( self ): + Log( "# test=%s : labels=%s" % ( self.name.ljust( 32 ), " ".join( self.labels ) ) ) # Make available to the tests -ats.manager.define(TestCase=TestCase) -ats.manager.define(Batch=Batch) +ats.manager.define( TestCase=TestCase ) +ats.manager.define( Batch=Batch ) diff --git a/geos_ats_package/geos_ats/test_steps.py b/geos_ats_package/geos_ats/test_steps.py index 3f471de..ed0409a 100644 --- a/geos_ats_package/geos_ats/test_steps.py +++ b/geos_ats_package/geos_ats/test_steps.py @@ -11,10 +11,10 @@ from geos_ats.common_utilities import Error, Log from geos_ats.configuration_record import config -logger = logging.getLogger('geos_ats') +logger = logging.getLogger( 'geos_ats' ) -def getGeosProblemName(deck, name): +def getGeosProblemName( deck, name ): """ Given an input deck and a name return the prefix Geos will attatch to it's output files. @@ -22,15 +22,15 @@ def getGeosProblemName(deck, name): NAME [in]: The name given to Geos on the command line. """ if name is None: - if deck.endswith(".xml"): - return os.path.basename(deck)[:-4] + if deck.endswith( ".xml" ): + return os.path.basename( deck )[ :-4 ] else: - return os.path.basename(deck) + return os.path.basename( deck ) else: return name -def findMaxMatchingFile(file_path): +def findMaxMatchingFile( file_path ): """ Given a path FILE_PATH where the base name of FILE_PATH is treated as a regular expression find and return the path of the greatest matching file/folder or None if no match is found. @@ -43,31 +43,31 @@ def findMaxMatchingFile(file_path): "test/plot_*.hdf5" will return the file with the greatest name in the ./test directory that begins with "plot_" and ends with ".hdf5". """ - file_directory, pattern = os.path.split(file_path) + file_directory, pattern = os.path.split( file_path ) if file_directory == "": file_directory = "." - if not os.path.isdir(file_directory): + if not os.path.isdir( file_directory ): return None max_match = '' - pattern = re.compile(pattern) - for file in os.listdir(file_directory): - if pattern.match(file) is not None: - max_match = max(file, max_match) + pattern = re.compile( pattern ) + for file in os.listdir( file_directory ): + if pattern.match( file ) is not None: + max_match = max( file, max_match ) if not max_match: return None - return os.path.join(file_directory, max_match) + return os.path.join( file_directory, max_match ) -class TestParam(object): +class TestParam( object ): """ A class that describes a parameter of a test step. """ - def __init__(self, name, doc, default=None): + def __init__( self, name, doc, default=None ): self.name = name self.doc = doc self.default = default @@ -78,7 +78,7 @@ def __init__(self, name, doc, default=None): ################################################################################ -class TestStepBase(object): +class TestStepBase( object ): """ The base clase for a test step. """ @@ -87,67 +87,67 @@ class TestStepBase(object): TestParam( "clean", "additional files to remove during the clean action." " clean may be a string or a list of strings. The strings may contain" - " wildcard characters."), + " wildcard characters." ), TestParam( "timelimit", "maximum time the step is allowed to run before it is considerend a TIMEOUT." - " Specified as a string such as: 1h30m, 60m, etc.", "None"), - TestParam("stdout", "If set, the stdout will be placed in the named file, in the TestCase directory", None), - TestParam("stderr", "If set, the stderr will be placed in the named file, in the TestCase directory", None), - TestParam("expectedResult", "'PASS' or 'FAIL'", "'PASS'"), - TestParam("delayed", "Whether execution of the step will be delayed", "False"), - TestParam("minor", "Whether failure of this step is minor issue", "False"), + " Specified as a string such as: 1h30m, 60m, etc.", "None" ), + TestParam( "stdout", "If set, the stdout will be placed in the named file, in the TestCase directory", None ), + TestParam( "stderr", "If set, the stderr will be placed in the named file, in the TestCase directory", None ), + TestParam( "expectedResult", "'PASS' or 'FAIL'", "'PASS'" ), + TestParam( "delayed", "Whether execution of the step will be delayed", "False" ), + TestParam( "minor", "Whether failure of this step is minor issue", "False" ), ) commonParams = { "name": - TestParam("name", "Used to give other params default values.", "The name of the TestCase"), + TestParam( "name", "Used to give other params default values.", "The name of the TestCase" ), "deck": - TestParam("deck", "Name of the input file. Setting deck to False means no deck is used.", ".in"), + TestParam( "deck", "Name of the input file. Setting deck to False means no deck is used.", ".in" ), "np": - TestParam("np", "The number of processors to run on.", 1), + TestParam( "np", "The number of processors to run on.", 1 ), "ngpu": - TestParam("ngpu", "The number of gpus to run on when available.", 0), + TestParam( "ngpu", "The number of gpus to run on when available.", 0 ), "check": TestParam( "check", "True or False. determines whether the default checksteps will " - "be automatically be added after this step.", "True"), + "be automatically be added after this step.", "True" ), "test_directory": - TestParam("test_directory", "subdirectory holding the test definitions", ""), + TestParam( "test_directory", "subdirectory holding the test definitions", "" ), "baseline_directory": - TestParam("baseline_directory", "subdirectory of config.testbaseline_directory where the test " - "baselines are located.", ""), + TestParam( "baseline_directory", "subdirectory of config.testbaseline_directory where the test " + "baselines are located.", "" ), "output_directory": - TestParam("output_directory", "subdirectory where the test log, params, rin, and " - "timehistory files are located.", ""), + TestParam( "output_directory", "subdirectory where the test log, params, rin, and " + "timehistory files are located.", "" ), "rebaseline": TestParam( "rebaseline", "additional files to rebaseline during the rebaseline action." - " rebaseline may be a string or a list of strings."), + " rebaseline may be a string or a list of strings." ), "timehistfile": - TestParam("timehistfile", "name of the file containing all the" - " timehist curves.", "testmode..ul"), + TestParam( "timehistfile", "name of the file containing all the" + " timehist curves.", "testmode..ul" ), "basetimehistfile": - TestParam("basetimehistfile", "location to the baseline timehistfile", - "//"), + TestParam( "basetimehistfile", "location to the baseline timehistfile", + "//" ), "allow_rebaseline": TestParam( "allow_rebaseline", "True if the second file should be re-baselined during a rebaseline action." - " False if the second file should not be rebaselined.", "True"), + " False if the second file should not be rebaselined.", "True" ), "testcase_name": - TestParam("testcase_name", "The name of the testcase"), + TestParam( "testcase_name", "The name of the testcase" ), "testcase_out": - TestParam("testcase_out", "The file where stdout for the testcase is accumulated"), + TestParam( "testcase_out", "The file where stdout for the testcase is accumulated" ), } # namespace to place the params. - class Params(object): + class Params( object ): pass - def __init__(self): + def __init__( self ): self.p = TestStepBase.Params() self.extraSteps = [] - def setParams(self, dictionary, paramlist): + def setParams( self, dictionary, paramlist ): """ Given a list of parameters PARAMLIST and a DICTIONARY set the parameters in PARAMLIST that are also in DICTIONARY but do not yet have a value. @@ -157,38 +157,38 @@ def setParams(self, dictionary, paramlist): """ for p in paramlist: pname = p.name - if getattr(self.p, pname, None) is None: - setattr(self.p, pname, dictionary.get(pname, None)) + if getattr( self.p, pname, None ) is None: + setattr( self.p, pname, dictionary.get( pname, None ) ) - def requireParam(self, param): + def requireParam( self, param ): """ Require that the given parameter is defined and not None. PARAM [in]: The name of the parameter to check. """ - if not hasattr(self.p, param): - Error("%s must be given" % param) - if getattr(self.p, param) is None: - Error("%s must not be None" % param) + if not hasattr( self.p, param ): + Error( "%s must be given" % param ) + if getattr( self.p, param ) is None: + Error( "%s must not be None" % param ) - def insertStep(self, steps): + def insertStep( self, steps ): """ Insert into the list of steps STEPS. STEPS [in/out]: The list of steps to insert into. """ - steps.append(self) + steps.append( self ) - def makeArgs(self): + def makeArgs( self ): """ Return the command line arguments for this step. """ - raise Error("Must implement this") + raise Error( "Must implement this" ) - def makeArgsForStatusKey(self): + def makeArgsForStatusKey( self ): return self.makeArgs() - def setStdout(self, dictionary): + def setStdout( self, dictionary ): """ Generate a unique stdout file using DICTIONARY. @@ -199,26 +199,26 @@ def setStdout(self, dictionary): self.p.stdout = stepname + "." + self.label() + ".out" if self.p.stdout in dictionary: - Log("Non-unique name for stdout file: %s" % self.p.stdout) + Log( "Non-unique name for stdout file: %s" % self.p.stdout ) else: - dictionary[self.p.stdout] = 1 + dictionary[ self.p.stdout ] = 1 - def update(self, dictionary): + def update( self, dictionary ): """ Update parameters using DICTIONARY. All parameters which already have values are not updated. Called by the owning TestCase to pass along it's arguments. DICTIONARY [in]: The dictionary used to update the parameters. """ - raise Error("Must implement this") + raise Error( "Must implement this" ) - def clean(self): + def clean( self ): """ Remove files generated by this test step. """ - self._clean([]) + self._clean( [] ) - def _clean(self, paths, noclean=[]): + def _clean( self, paths, noclean=[] ): """ Delete files/folders in PATHS and self.p.clean as well as stdout and stderr but not in NOCLEAN. Paths to delete can have wildcard characters '*'. @@ -226,21 +226,21 @@ def _clean(self, paths, noclean=[]): PATHS [in]: Paths to remove, can have wildcard characters. NOCLEAN [in]: Paths to ignore, can not have wildcard characters. """ - self._remove(paths, noclean) + self._remove( paths, noclean ) - if hasattr(self.p, "clean"): + if hasattr( self.p, "clean" ): if self.p.clean is not None: - self._remove(self.p.clean, noclean) - if hasattr(self.p, "stdout"): + self._remove( self.p.clean, noclean ) + if hasattr( self.p, "stdout" ): if self.p.stdout is not None: - self._remove(self.p.stdout, noclean) - self._remove("%s.*" % self.p.stdout, noclean) - if hasattr(self.p, "stderr"): + self._remove( self.p.stdout, noclean ) + self._remove( "%s.*" % self.p.stdout, noclean ) + if hasattr( self.p, "stderr" ): if self.p.stderr is not None: - self._remove(self.p.stderr, noclean) - self._remove("%s.*" % self.p.stderr, noclean) + self._remove( self.p.stderr, noclean ) + self._remove( "%s.*" % self.p.stderr, noclean ) - def _remove(self, paths, noclean): + def _remove( self, paths, noclean ): """ Delete files/folders in PATHS but not in NOCLEAN. Paths to delete can have wildcard characters '*'. @@ -248,100 +248,100 @@ def _remove(self, paths, noclean): PATHS [in]: Paths to remove, can have wildcard characters. NOCLEAN [in]: Paths to ignore, can not have wildcard characters. """ - if isinstance(paths, str): - paths = [paths] + if isinstance( paths, str ): + paths = [ paths ] for path in paths: if self.getTestMode(): - Log("clean: %s" % path) + Log( "clean: %s" % path ) else: - delpaths = glob.glob(path) + delpaths = glob.glob( path ) for p in delpaths: if p in noclean: continue try: - if os.path.isdir(p): - shutil.rmtree(p) + if os.path.isdir( p ): + shutil.rmtree( p ) else: - os.remove(p) + os.remove( p ) except OSError as e: - logger.debug(e) # so that two simultaneous clean operations don't fail + logger.debug( e ) # so that two simultaneous clean operations don't fail - def getCheckOption(self): - return ats.tests.AtsTest.getOptions().get("checkoption") + def getCheckOption( self ): + return ats.tests.AtsTest.getOptions().get( "checkoption" ) - def getTestMode(self): - return ats.tests.AtsTest.getOptions().get("testmode") + def getTestMode( self ): + return ats.tests.AtsTest.getOptions().get( "testmode" ) - def isCheck(self): + def isCheck( self ): """ Return True iff this is a check step. """ return False - def isDelayed(self): + def isDelayed( self ): """ Return True iff this step and all substeps should be moved to the end of the test case. """ return self.p.delayed - def isMinor(self): + def isMinor( self ): """ Return True iff failure of this step is a minor issue. """ return self.p.minor - def saveOut(self): + def saveOut( self ): return self.p.stdout - def saveErr(self): + def saveErr( self ): return self.p.stderr - def useMPI(self): + def useMPI( self ): """ Return True iff this step uses MPI. """ return False - def resultPaths(self): + def resultPaths( self ): """ Return a list of paths generated by this step. """ return [] - def timelimit(self): - return getattr(self.p, "timelimit", None) + def timelimit( self ): + return getattr( self.p, "timelimit", None ) - def expectedResult(self): - return getattr(self.p, "expectedResult", "PASS") + def expectedResult( self ): + return getattr( self.p, "expectedResult", "PASS" ) - def handleCommonParams(self): + def handleCommonParams( self ): """ Handle all the common parameters. """ - if hasattr(self.p, "np"): + if hasattr( self.p, "np" ): if self.p.np is None: self.p.np = 1 - if hasattr(self.p, "ngpu"): + if hasattr( self.p, "ngpu" ): if self.p.ngpu is None: self.p.ngpu = 0 - if hasattr(self.p, "check"): + if hasattr( self.p, "check" ): if self.p.check is None: self.p.check = True - if hasattr(self.p, "allow_rebaseline"): + if hasattr( self.p, "allow_rebaseline" ): if self.p.allow_rebaseline is None: self.p.allow_rebaseline = True - def executable(self): + def executable( self ): """ Return the path of the executable used to execute this step. """ - raise Error("Must implement this") + raise Error( "Must implement this" ) - def rebaseline(self): + def rebaseline( self ): """ Rebaseline this test step. """ @@ -351,23 +351,23 @@ def rebaseline(self): ################################################################################ # CheckTestStepBase ################################################################################ -class CheckTestStepBase(TestStepBase): +class CheckTestStepBase( TestStepBase ): """ Base class for check test steps. """ - checkParams = (TestParam( + checkParams = ( TestParam( "enabled", "True or False. determines whether this step is enabled. Often times used to turn off automatic check steps", - "True"), ) + "True" ), ) - def isCheck(self): + def isCheck( self ): return True - def handleCommonParams(self): - TestStepBase.handleCommonParams(self) + def handleCommonParams( self ): + TestStepBase.handleCommonParams( self ) - if hasattr(self.p, "enabled"): + if hasattr( self.p, "enabled" ): if self.p.enabled is None: self.p.enabled = True @@ -375,7 +375,7 @@ def handleCommonParams(self): ################################################################################ # geos ################################################################################ -class geos(TestStepBase): +class geos( TestStepBase ): """ Class for the Geos test step. """ @@ -386,20 +386,20 @@ class geos(TestStepBase): command = "geosx [-i ] [-r ] [-x ] [-y ] [-z ] [-s ] [-n ] [-o ] [ --suppress-pinned ] " params = TestStepBase.defaultParams + ( - TestStepBase.commonParams["name"], TestStepBase.commonParams["deck"], TestStepBase.commonParams["np"], - TestStepBase.commonParams["ngpu"], TestStepBase.commonParams["check"], - TestStepBase.commonParams["test_directory"], TestStepBase.commonParams["baseline_directory"], - TestStepBase.commonParams["output_directory"], TestParam("restart_file", "The name of the restart file."), - TestParam("x_partitions", "The number of partitions in the x direction."), - TestParam("y_partitions", "The number of partitions in the y direction."), - TestParam("z_partitions", - "The number of partitions in the z direction."), TestParam("schema_level", "The schema level."), - TestParam("suppress-pinned", "Option to suppress use of pinned memory for MPI buffers."), - TestParam("trace_data_migration", "Trace host-device data migration.")) + TestStepBase.commonParams[ "name" ], TestStepBase.commonParams[ "deck" ], TestStepBase.commonParams[ "np" ], + TestStepBase.commonParams[ "ngpu" ], TestStepBase.commonParams[ "check" ], + TestStepBase.commonParams[ "test_directory" ], TestStepBase.commonParams[ "baseline_directory" ], + TestStepBase.commonParams[ "output_directory" ], TestParam( "restart_file", "The name of the restart file." ), + TestParam( "x_partitions", "The number of partitions in the x direction." ), + TestParam( "y_partitions", "The number of partitions in the y direction." ), + TestParam( "z_partitions", + "The number of partitions in the z direction." ), TestParam( "schema_level", "The schema level." ), + TestParam( "suppress-pinned", "Option to suppress use of pinned memory for MPI buffers." ), + TestParam( "trace_data_migration", "Trace host-device data migration." ) ) - checkstepnames = ["restartcheck"] + checkstepnames = [ "restartcheck" ] - def __init__(self, restartcheck_params=None, curvecheck_params=None, **kw): + def __init__( self, restartcheck_params=None, curvecheck_params=None, **kw ): """ Initializes the parameters of this test step, and creates the appropriate check steps. @@ -408,109 +408,109 @@ def __init__(self, restartcheck_params=None, curvecheck_params=None, **kw): KEYWORDS [in]: Dictionary that is used to set the parameters of this step and also all check steps. """ - TestStepBase.__init__(self) - self.setParams(kw, self.params) + TestStepBase.__init__( self ) + self.setParams( kw, self.params ) checkOption = self.getCheckOption() self.checksteps = [] - if checkOption in ["all", "curvecheck"]: + if checkOption in [ "all", "curvecheck" ]: if curvecheck_params is not None: - self.checksteps.append(curvecheck(curvecheck_params, **kw)) + self.checksteps.append( curvecheck( curvecheck_params, **kw ) ) - if checkOption in ["all", "restartcheck"]: + if checkOption in [ "all", "restartcheck" ]: if restartcheck_params is not None: - self.checksteps.append(restartcheck(restartcheck_params, **kw)) + self.checksteps.append( restartcheck( restartcheck_params, **kw ) ) - def label(self): + def label( self ): return "geos" - def useMPI(self): + def useMPI( self ): return True - def executable(self): + def executable( self ): # python = os.path.join(binDir, "..", "lib", "PYGEOS", "bin", "python3") # pygeosDir = os.path.join(binDir, "..", "..", "src", "pygeos") # return python + " -m mpi4py " + os.path.join( pygeosDir, "reentrantTest.py" ) # return python + " -m mpi4py " + os.path.join( pygeosDir, "test.py" ) # return config.geos_bin_dir - return os.path.join(config.geos_bin_dir, 'geosx') + return os.path.join( config.geos_bin_dir, 'geosx' ) - def update(self, dictionary): - self.setParams(dictionary, self.params) + def update( self, dictionary ): + self.setParams( dictionary, self.params ) - self.requireParam("deck") - self.requireParam("name") - self.requireParam("baseline_directory") - self.requireParam("output_directory") - self.requireParam("test_directory") + self.requireParam( "deck" ) + self.requireParam( "name" ) + self.requireParam( "baseline_directory" ) + self.requireParam( "output_directory" ) + self.requireParam( "test_directory" ) self.handleCommonParams() - self.setStdout(dictionary) + self.setStdout( dictionary ) # update all the checksteps if self.p.check: for step in self.checksteps: - step.update(dictionary) + step.update( dictionary ) - def insertStep(self, steps): + def insertStep( self, steps ): # the step - steps.append(self) + steps.append( self ) # the post conditions if self.p.check: for step in self.checksteps: - step.insertStep(steps) + step.insertStep( steps ) - def makeArgs(self): + def makeArgs( self ): args = [] if self.p.deck: - args += ["-i", os.path.join(self.p.test_directory, self.p.deck)] + args += [ "-i", os.path.join( self.p.test_directory, self.p.deck ) ] if self.p.restart_file: - args += ["-r", os.path.abspath(os.path.join(self.p.output_directory, '..', self.p.restart_file))] + args += [ "-r", os.path.abspath( os.path.join( self.p.output_directory, '..', self.p.restart_file ) ) ] if self.p.x_partitions: - args += ["-x", self.p.x_partitions] + args += [ "-x", self.p.x_partitions ] if self.p.y_partitions: - args += ["-y", self.p.y_partitions] + args += [ "-y", self.p.y_partitions ] if self.p.z_partitions: - args += ["-z", self.p.z_partitions] + args += [ "-z", self.p.z_partitions ] if self.p.schema_level: - args += ["-s", self.p.schema_level] + args += [ "-s", self.p.schema_level ] if self.p.name: - args += ["-n", self.p.name] + args += [ "-n", self.p.name ] if self.p.output_directory: - args += ["-o", self.p.output_directory] + args += [ "-o", self.p.output_directory ] # if self.p.ngpu == 0: if self.p.ngpu >= 0: - args += ["--suppress-pinned"] + args += [ "--suppress-pinned" ] if self.p.trace_data_migration: - args += ["--trace-data-migration"] + args += [ "--trace-data-migration" ] - return list(map(str, args)) + return list( map( str, args ) ) - def resultPaths(self): - name = getGeosProblemName(self.p.deck, self.p.name) - paths = [os.path.join(self.p.output_directory, f"{name}_restart_*")] + def resultPaths( self ): + name = getGeosProblemName( self.p.deck, self.p.name ) + paths = [ os.path.join( self.p.output_directory, f"{name}_restart_*" ) ] return paths - def clean(self): - self._clean(self.resultPaths()) + def clean( self ): + self._clean( self.resultPaths() ) ################################################################################ # restartcheck ################################################################################ -class restartcheck(CheckTestStepBase): +class restartcheck( CheckTestStepBase ): """ Class for the restart check test step. """ @@ -520,127 +520,129 @@ class restartcheck(CheckTestStepBase): command = """restartcheck [-r RELATIVE] [-a ABSOLUTE] [-o OUTPUT] [-e EXCLUDE [EXCLUDE ...]] [-w] file_pattern baseline_pattern""" params = TestStepBase.defaultParams + CheckTestStepBase.checkParams + ( - TestStepBase.commonParams["deck"], TestStepBase.commonParams["name"], TestStepBase.commonParams["np"], - TestStepBase.commonParams["allow_rebaseline"], TestStepBase.commonParams["baseline_directory"], - TestStepBase.commonParams["output_directory"], - TestParam("file_pattern", "Regex pattern to match file written out by geos."), - TestParam("baseline_pattern", "Regex pattern to match file to compare against."), - TestParam("rtol", - "Relative tolerance, default is 0.0."), TestParam("atol", "Absolute tolerance, default is 0.0."), - TestParam("exclude", "Regular expressions matching groups to exclude from the check, default is None."), - TestParam("warnings_are_errors", "Treat warnings as errors, default is True."), - TestParam("suppress_output", "Whether to write output to stdout, default is True."), - TestParam("skip_missing", "Whether to skip missing values in target or baseline files, default is False.")) - - def __init__(self, restartcheck_params, **kw): + TestStepBase.commonParams[ "deck" ], TestStepBase.commonParams[ "name" ], TestStepBase.commonParams[ "np" ], + TestStepBase.commonParams[ "allow_rebaseline" ], TestStepBase.commonParams[ "baseline_directory" ], + TestStepBase.commonParams[ "output_directory" ], + TestParam( "file_pattern", "Regex pattern to match file written out by geos." ), + TestParam( "baseline_pattern", "Regex pattern to match file to compare against." ), + TestParam( "rtol", + "Relative tolerance, default is 0.0." ), TestParam( "atol", "Absolute tolerance, default is 0.0." ), + TestParam( "exclude", "Regular expressions matching groups to exclude from the check, default is None." ), + TestParam( "warnings_are_errors", "Treat warnings as errors, default is True." ), + TestParam( "suppress_output", "Whether to write output to stdout, default is True." ), + TestParam( "skip_missing", "Whether to skip missing values in target or baseline files, default is False." ) ) + + def __init__( self, restartcheck_params, **kw ): """ Set parameters with RESTARTCHECK_PARAMS and then with KEYWORDS. """ - CheckTestStepBase.__init__(self) + CheckTestStepBase.__init__( self ) self.p.warnings_are_errors = True if restartcheck_params is not None: - self.setParams(restartcheck_params, self.params) - self.setParams(kw, self.params) + self.setParams( restartcheck_params, self.params ) + self.setParams( kw, self.params ) - def label(self): + def label( self ): return "restartcheck" - def useMPI(self): + def useMPI( self ): return True - def executable(self): + def executable( self ): if self.getTestMode(): return "python -m mpi4py" else: return sys.executable + " -m mpi4py" - def update(self, dictionary): - self.setParams(dictionary, self.params) + def update( self, dictionary ): + self.setParams( dictionary, self.params ) self.handleCommonParams() - self.requireParam("deck") - self.requireParam("baseline_directory") - self.requireParam("output_directory") + self.requireParam( "deck" ) + self.requireParam( "baseline_directory" ) + self.requireParam( "output_directory" ) if self.p.file_pattern is None: - self.p.file_pattern = getGeosProblemName(self.p.deck, self.p.name) + r"_restart_[0-9]+\.root" + self.p.file_pattern = getGeosProblemName( self.p.deck, self.p.name ) + r"_restart_[0-9]+\.root" if self.p.baseline_pattern is None: self.p.baseline_pattern = self.p.file_pattern - self.restart_file_regex = os.path.join(self.p.output_directory, self.p.file_pattern) - self.restart_baseline_regex = os.path.join(self.p.baseline_directory, self.p.baseline_pattern) + self.restart_file_regex = os.path.join( self.p.output_directory, self.p.file_pattern ) + self.restart_baseline_regex = os.path.join( self.p.baseline_directory, self.p.baseline_pattern ) if self.p.allow_rebaseline is None: self.p.allow_rebaseline = True - def insertStep(self, steps): + def insertStep( self, steps ): if config.restartcheck_enabled and self.p.enabled: - steps.append(self) + steps.append( self ) - def makeArgs(self): - cur_dir = os.path.dirname(os.path.realpath(__file__)) - script_location = os.path.join(cur_dir, "helpers", "restart_check.py") - args = [script_location] + def makeArgs( self ): + cur_dir = os.path.dirname( os.path.realpath( __file__ ) ) + script_location = os.path.join( cur_dir, "helpers", "restart_check.py" ) + args = [ script_location ] if self.p.atol is not None: - args += ["-a", self.p.atol] + args += [ "-a", self.p.atol ] if self.p.rtol is not None: - args += ["-r", self.p.rtol] + args += [ "-r", self.p.rtol ] if self.p.warnings_are_errors: - args += ["-w"] + args += [ "-w" ] if self.p.suppress_output: - args += ["-s"] - if (self.p.skip_missing or config.restart_skip_missing): - args += ["-m"] + args += [ "-s" ] + if ( self.p.skip_missing or config.restart_skip_missing ): + args += [ "-m" ] exclude_values = config.restart_exclude_pattern if self.p.exclude is not None: - exclude_values.extend(self.p.exclude) + exclude_values.extend( self.p.exclude ) for v in exclude_values: - args += ["-e", v] + args += [ "-e", v ] - args += [self.restart_file_regex, self.restart_baseline_regex] - return list(map(str, args)) + args += [ self.restart_file_regex, self.restart_baseline_regex ] + return list( map( str, args ) ) - def rebaseline(self): + def rebaseline( self ): if not self.p.allow_rebaseline: - Log("Rebaseline not allowed for restartcheck of %s." % self.p.name) + Log( "Rebaseline not allowed for restartcheck of %s." % self.p.name ) return - root_file_path = findMaxMatchingFile(self.restart_file_regex) + root_file_path = findMaxMatchingFile( self.restart_file_regex ) if root_file_path is None: - raise IOError("File not found matching the pattern %s in directory %s." % - (self.restart_file_regex, os.getcwd())) + raise IOError( "File not found matching the pattern %s in directory %s." % + ( self.restart_file_regex, os.getcwd() ) ) - baseline_directory = os.path.dirname(self.restart_baseline_regex) - root_baseline_path = findMaxMatchingFile(self.restart_baseline_regex) + baseline_directory = os.path.dirname( self.restart_baseline_regex ) + root_baseline_path = findMaxMatchingFile( self.restart_baseline_regex ) if root_baseline_path is not None: # Delete the baseline root file. - os.remove(root_baseline_path) + os.remove( root_baseline_path ) # Delete the directory holding the baseline data files. - data_dir_path = os.path.splitext(root_baseline_path)[0] - shutil.rmtree(data_dir_path) + data_dir_path = os.path.splitext( root_baseline_path )[ 0 ] + shutil.rmtree( data_dir_path ) else: - os.makedirs(baseline_directory, exist_ok=True) + os.makedirs( baseline_directory, exist_ok=True ) # Copy the root file into the baseline directory. - shutil.copy2(root_file_path, os.path.join(baseline_directory, os.path.basename(root_file_path))) + shutil.copy2( root_file_path, os.path.join( baseline_directory, os.path.basename( root_file_path ) ) ) # Copy the directory holding the data files into the baseline directory. - data_dir_path = os.path.splitext(root_file_path)[0] - shutil.copytree(data_dir_path, os.path.join(baseline_directory, os.path.basename(data_dir_path))) + data_dir_path = os.path.splitext( root_file_path )[ 0 ] + shutil.copytree( data_dir_path, os.path.join( baseline_directory, os.path.basename( data_dir_path ) ) ) - def resultPaths(self): - return [os.path.join(self.p.output_directory, "%s.restartcheck" % os.path.splitext(self.p.file_pattern)[0])] + def resultPaths( self ): + return [ + os.path.join( self.p.output_directory, "%s.restartcheck" % os.path.splitext( self.p.file_pattern )[ 0 ] ) + ] - def clean(self): - self._clean(self.resultPaths()) + def clean( self ): + self._clean( self.resultPaths() ) ################################################################################ # curvecheck ################################################################################ -class curvecheck(CheckTestStepBase): +class curvecheck( CheckTestStepBase ): """ Class for the curve check test step. """ @@ -650,184 +652,184 @@ class curvecheck(CheckTestStepBase): command = """curve_check.py [-h] [-c CURVE [CURVE ...]] [-t TOLERANCE] [-w] [-o OUTPUT] [-n N_COLUMN] [-u {milliseconds,seconds,minutes,hours,days,years}] filename baseline""" params = TestStepBase.defaultParams + CheckTestStepBase.checkParams + ( - TestStepBase.commonParams["deck"], TestStepBase.commonParams["name"], TestStepBase.commonParams["np"], - TestStepBase.commonParams["allow_rebaseline"], TestStepBase.commonParams["baseline_directory"], - TestStepBase.commonParams["output_directory"], - TestParam("filename", "Name of the target curve file written by GEOS."), - TestParam("curves", "A list of parameter, setname value pairs."), + TestStepBase.commonParams[ "deck" ], TestStepBase.commonParams[ "name" ], TestStepBase.commonParams[ "np" ], + TestStepBase.commonParams[ "allow_rebaseline" ], TestStepBase.commonParams[ "baseline_directory" ], + TestStepBase.commonParams[ "output_directory" ], + TestParam( "filename", "Name of the target curve file written by GEOS." ), + TestParam( "curves", "A list of parameter, setname value pairs." ), TestParam( "tolerance", "Curve check tolerance (||x-y||/N), can be specified as a single value or a list of floats corresponding to the curves." - ), TestParam("warnings_are_errors", "Treat warnings as errors, default is True."), - TestParam("script_instructions", "A list of (path, function, value, setname) entries"), - TestParam("time_units", "Time units to use for plots.")) + ), TestParam( "warnings_are_errors", "Treat warnings as errors, default is True." ), + TestParam( "script_instructions", "A list of (path, function, value, setname) entries" ), + TestParam( "time_units", "Time units to use for plots." ) ) - def __init__(self, curvecheck_params, **kw): + def __init__( self, curvecheck_params, **kw ): """ Set parameters with CURVECHECK_PARAMS and then with KEYWORDS. """ - CheckTestStepBase.__init__(self) + CheckTestStepBase.__init__( self ) self.p.warnings_are_errors = True if curvecheck_params is not None: c = curvecheck_params.copy() - Nc = len(c.get('curves', [])) + Nc = len( c.get( 'curves', [] ) ) # Note: ats seems to store list/tuple parameters incorrectly # Convert these to strings - for k in ['curves', 'script_instructions']: + for k in [ 'curves', 'script_instructions' ]: if k in c: - if isinstance(c[k], (list, tuple)): - c[k] = ';'.join([','.join(c) for c in c[k]]) + if isinstance( c[ k ], ( list, tuple ) ): + c[ k ] = ';'.join( [ ','.join( c ) for c in c[ k ] ] ) # Check whether tolerance was specified as a single float, list # and then convert into a comma-delimited string - tol = c.get('tolerance', 0.0) - if isinstance(tol, (float, int)): - tol = [tol] * Nc - c['tolerance'] = ','.join([str(x) for x in tol]) + tol = c.get( 'tolerance', 0.0 ) + if isinstance( tol, ( float, int ) ): + tol = [ tol ] * Nc + c[ 'tolerance' ] = ','.join( [ str( x ) for x in tol ] ) - self.setParams(c, self.params) - self.setParams(kw, self.params) + self.setParams( c, self.params ) + self.setParams( kw, self.params ) - def label(self): + def label( self ): return "curvecheck" - def useMPI(self): + def useMPI( self ): return True - def executable(self): + def executable( self ): if self.getTestMode(): return "python" else: return sys.executable - def update(self, dictionary): - self.setParams(dictionary, self.params) + def update( self, dictionary ): + self.setParams( dictionary, self.params ) self.handleCommonParams() - self.requireParam("deck") - self.requireParam("baseline_directory") - self.requireParam("output_directory") + self.requireParam( "deck" ) + self.requireParam( "baseline_directory" ) + self.requireParam( "output_directory" ) - self.baseline_file = os.path.join(self.p.baseline_directory, self.p.filename) - self.target_file = os.path.join(self.p.output_directory, self.p.filename) - self.figure_root = os.path.join(self.p.output_directory, 'curve_check') + self.baseline_file = os.path.join( self.p.baseline_directory, self.p.filename ) + self.target_file = os.path.join( self.p.output_directory, self.p.filename ) + self.figure_root = os.path.join( self.p.output_directory, 'curve_check' ) if self.p.allow_rebaseline is None: self.p.allow_rebaseline = True - def insertStep(self, steps): + def insertStep( self, steps ): if config.restartcheck_enabled and self.p.enabled: - steps.append(self) + steps.append( self ) - def makeArgs(self): - cur_dir = os.path.dirname(os.path.realpath(__file__)) - script_location = os.path.join(cur_dir, "helpers", "curve_check.py") - args = [script_location] + def makeArgs( self ): + cur_dir = os.path.dirname( os.path.realpath( __file__ ) ) + script_location = os.path.join( cur_dir, "helpers", "curve_check.py" ) + args = [ script_location ] if self.p.curves is not None: - for c in self.p.curves.split(';'): - args += ["-c"] - args += c.split(',') + for c in self.p.curves.split( ';' ): + args += [ "-c" ] + args += c.split( ',' ) if self.p.tolerance is not None: - for t in self.p.tolerance.split(','): - args += ["-t", t] + for t in self.p.tolerance.split( ',' ): + args += [ "-t", t ] if self.p.time_units is not None: - args += ["-u", self.p.time_units] + args += [ "-u", self.p.time_units ] if self.p.script_instructions is not None: - for c in self.p.script_instructions.split(';'): - args += ["-s"] - args += c.split(',') + for c in self.p.script_instructions.split( ';' ): + args += [ "-s" ] + args += c.split( ',' ) if self.p.warnings_are_errors: - args += ["-w"] + args += [ "-w" ] - args += ['-o', self.figure_root] - args += [self.target_file, self.baseline_file] - return list(map(str, args)) + args += [ '-o', self.figure_root ] + args += [ self.target_file, self.baseline_file ] + return list( map( str, args ) ) - def rebaseline(self): + def rebaseline( self ): if not self.p.allow_rebaseline: - Log("Rebaseline not allowed for curvecheck of %s." % self.p.name) + Log( "Rebaseline not allowed for curvecheck of %s." % self.p.name ) return - baseline_directory = os.path.split(self.baseline_file)[0] - os.makedirs(baseline_directory, exist_ok=True) - shutil.copyfile(self.target_file, self.baseline_file) + baseline_directory = os.path.split( self.baseline_file )[ 0 ] + os.makedirs( baseline_directory, exist_ok=True ) + shutil.copyfile( self.target_file, self.baseline_file ) - def resultPaths(self): - return [self.target_file, os.path.join(self.figure_root, '*.png')] + def resultPaths( self ): + return [ self.target_file, os.path.join( self.figure_root, '*.png' ) ] - def clean(self): - self._clean(self.resultPaths()) + def clean( self ): + self._clean( self.resultPaths() ) -def infoTestStepParams(params, maxwidth=None): +def infoTestStepParams( params, maxwidth=None ): if maxwidth is None: - maxwidth = max(10, max([len(p.name) for p in params])) + maxwidth = max( 10, max( [ len( p.name ) for p in params ] ) ) for p in params: paramdoc = p.doc if p.default is not None: - paramdoc += " (default = %s)" % (p.default) - paramdoc = textwrap.wrap(paramdoc, width=100 - maxwidth) - logger.debug(" %*s:" % (maxwidth, p.name), paramdoc[0].strip()) - for line in paramdoc[1:]: - logger.debug(" %*s %s" % (maxwidth, "", line.strip())) + paramdoc += " (default = %s)" % ( p.default ) + paramdoc = textwrap.wrap( paramdoc, width=100 - maxwidth ) + logger.debug( " %*s:" % ( maxwidth, p.name ), paramdoc[ 0 ].strip() ) + for line in paramdoc[ 1: ]: + logger.debug( " %*s %s" % ( maxwidth, "", line.strip() ) ) -def infoTestStep(stepname): - topic = common_utilities.InfoTopic(stepname) +def infoTestStep( stepname ): + topic = common_utilities.InfoTopic( stepname ) topic.startBanner() - logger.debug(f"TestStep: {stepname}") - stepclass = globals()[stepname] - if not hasattr(stepclass, "doc"): + logger.debug( f"TestStep: {stepname}" ) + stepclass = globals()[ stepname ] + if not hasattr( stepclass, "doc" ): return - logger.debug("Description:") - doc = textwrap.dedent(stepclass.doc) - doc = textwrap.wrap(doc, width=100) + logger.debug( "Description:" ) + doc = textwrap.dedent( stepclass.doc ) + doc = textwrap.wrap( doc, width=100 ) for line in doc: - logger.debug(" ", line.strip()) + logger.debug( " ", line.strip() ) - logger.debug("Command:") - doc = textwrap.dedent(stepclass.command) - doc = textwrap.wrap(doc, width=100) - logger.debug(f" {doc[0].strip()}") - for line in doc[1:]: - logger.debug(f'\\\n {" " * len(stepname)} {line}') + logger.debug( "Command:" ) + doc = textwrap.dedent( stepclass.command ) + doc = textwrap.wrap( doc, width=100 ) + logger.debug( f" {doc[0].strip()}" ) + for line in doc[ 1: ]: + logger.debug( f'\\\n {" " * len(stepname)} {line}' ) # compute max param width: - allparams = [p.name for p in stepclass.params] - if hasattr(stepclass, "checkstepnames"): + allparams = [ p.name for p in stepclass.params ] + if hasattr( stepclass, "checkstepnames" ): for checkstep in stepclass.checkstepnames: - checkclass = globals()[checkstep] - if not hasattr(checkclass, "doc"): + checkclass = globals()[ checkstep ] + if not hasattr( checkclass, "doc" ): continue - allparams.extend([p.name for p in checkclass.params]) - maxwidth = max(10, max([len(p) for p in allparams])) + allparams.extend( [ p.name for p in checkclass.params ] ) + maxwidth = max( 10, max( [ len( p ) for p in allparams ] ) ) - logger.debug("Parameters:") - infoTestStepParams(stepclass.params, maxwidth) + logger.debug( "Parameters:" ) + infoTestStepParams( stepclass.params, maxwidth ) - paramset = set([p.name for p in stepclass.params]) + paramset = set( [ p.name for p in stepclass.params ] ) - if hasattr(stepclass, "checkstepnames"): + if hasattr( stepclass, "checkstepnames" ): for checkstep in stepclass.checkstepnames: - logger.debug(f"CheckStep: {checkstep}") + logger.debug( f"CheckStep: {checkstep}" ) checkparams = [] - checkclass = globals()[checkstep] - if not hasattr(checkclass, "doc"): + checkclass = globals()[ checkstep ] + if not hasattr( checkclass, "doc" ): continue for p in checkclass.params: if p.name not in paramset: - checkparams.append(p) + checkparams.append( p ) - infoTestStepParams(checkparams, maxwidth) + infoTestStepParams( checkparams, maxwidth ) topic.endBanner() -def infoTestSteps(*args): +def infoTestSteps( *args ): """This function is used to print documentation about the teststeps to stdout""" # get the list of step classes @@ -835,39 +837,39 @@ def infoTestSteps(*args): checkstepnames = [] for k, v in globals().items(): - if not isinstance(v, type): + if not isinstance( v, type ): continue - if v in (CheckTestStepBase, TestStepBase): + if v in ( CheckTestStepBase, TestStepBase ): continue try: - if issubclass(v, CheckTestStepBase): - checkstepnames.append(k) - elif issubclass(v, TestStepBase): - steps.append(k) + if issubclass( v, CheckTestStepBase ): + checkstepnames.append( k ) + elif issubclass( v, TestStepBase ): + steps.append( k ) except TypeError as e: - logger.debug(e) + logger.debug( e ) - steps = sorted(steps) - checkstepnames = sorted(checkstepnames) + steps = sorted( steps ) + checkstepnames = sorted( checkstepnames ) steps = steps + checkstepnames def all(): for s in steps: - infoTestStep(s) + infoTestStep( s ) - topic = common_utilities.InfoTopic("teststep") - topic.addTopic("all", "full info on all the teststeps", all) + topic = common_utilities.InfoTopic( "teststep" ) + topic.addTopic( "all", "full info on all the teststeps", all ) for s in steps: - stepclass = globals()[s] - doc = getattr(stepclass, "doc", None) - topic.addTopic(s, textwrap.dedent(doc).strip(), lambda ss=s: infoTestStep(ss)) + stepclass = globals()[ s ] + doc = getattr( stepclass, "doc", None ) + topic.addTopic( s, textwrap.dedent( doc ).strip(), lambda ss=s: infoTestStep( ss ) ) - topic.process(args) + topic.process( args ) # Register test step definitions -ats.manager.define(geos=geos) -ats.manager.define(restartcheck=restartcheck) -ats.manager.define(config=config) +ats.manager.define( geos=geos ) +ats.manager.define( restartcheck=restartcheck ) +ats.manager.define( config=config ) From cf298026c8cd60b94f0feae164b6217aea3af483 Mon Sep 17 00:00:00 2001 From: Christopher Sherman Date: Mon, 29 Jan 2024 15:22:26 -0800 Subject: [PATCH 08/14] Using dataclass in geos_ats.reporting --- geos_ats_package/geos_ats/reporting.py | 126 +++++++++++++++---------- 1 file changed, 75 insertions(+), 51 deletions(-) diff --git a/geos_ats_package/geos_ats/reporting.py b/geos_ats_package/geos_ats/reporting.py index a5b0199..f339449 100644 --- a/geos_ats_package/geos_ats/reporting.py +++ b/geos_ats_package/geos_ats/reporting.py @@ -6,6 +6,9 @@ from tabulate import tabulate import glob import logging +from collections.abc import Mapping +from dataclasses import dataclass +from ats import atsut from ats.times import hms from ats import ( PASSED, FAILED, TIMEDOUT, EXPECTED, BATCHED, FILTERED, SKIPPED, CREATED, RUNNING, HALTED, LSFERROR ) @@ -15,18 +18,44 @@ # Status value in priority order STATUS = ( EXPECTED, CREATED, BATCHED, FILTERED, SKIPPED, RUNNING, PASSED, TIMEDOUT, HALTED, LSFERROR, FAILED ) -COLORS = {} -COLORS[ EXPECTED.name ] = "black" -COLORS[ CREATED.name ] = "black" -COLORS[ BATCHED.name ] = "black" -COLORS[ FILTERED.name ] = "black" -COLORS[ SKIPPED.name ] = "orange" -COLORS[ RUNNING.name ] = "blue" -COLORS[ PASSED.name ] = "green" -COLORS[ TIMEDOUT.name ] = "red" -COLORS[ HALTED.name ] = "brown" -COLORS[ LSFERROR.name ] = "brown" -COLORS[ FAILED.name ] = "red" +COLORS: Mapping[ str, str ] = { + EXPECTED.name: "black", + CREATED.name: "black", + BATCHED.name: "black", + FILTERED.name: "black", + SKIPPED.name: "orange", + RUNNING.name: "blue", + PASSED.name: "green", + TIMEDOUT.name: "red", + HALTED.name: "brown", + LSFERROR.name: "brown", + FAILED.name: "red", +} + + +@dataclass( frozen=True ) +class TestStepRecord: + status: atsut._StatusCode + log: str + output: list + number: int + elapsed: float + + +@dataclass( frozen=True ) +class TestCaseRecord: + steps: dict + status: atsut._StatusCode + test_number: int + elapsed: float + current_step: str + resources: int + + +@dataclass( frozen=True ) +class TestGroupRecord: + tests: list + status: atsut._StatusCode def max_status( sa, sb ): @@ -52,43 +81,39 @@ def __init__( self, test_steps ): # Save data if test_name not in self.test_results: - self.test_results[ test_name ] = { - 'steps': {}, - 'status': EXPECTED, - 'id': test_id, - 'elapsed': 0.0, - 'current_step': ' ', - 'resources': t.np - } - self.test_results[ test_name ][ 'steps' ][ t.name ] = { - 'status': t.status, - 'log': t.outname, - 'output': t.step_outputs, - 'number': t.groupSerialNumber - } + self.test_results[ test_name ] = TestCaseRecord( steps={}, + status=EXPECTED, + test_number=test_id, + elapsed=0.0, + current_step=' ', + resources=t.np ) # Check elapsed time elapsed = 0.0 if hasattr( t, 'endTime' ): elapsed = t.endTime - t.startTime - self.test_results[ test_name ][ 'steps' ][ t.name ][ 'elapsed' ] = elapsed - self.test_results[ test_name ][ 'elapsed' ] += elapsed + self.test_results[ test_name ].elapsed += elapsed + + # Add the step + self.test_results[ test_name ].steps[ t.name ] = TestStepRecord( status=t.status, + log=t.outname, + output=t.step_outputs, + number=t.groupSerialNumber, + elapsed=elapsed ) # Check the status and the latest step - self.test_results[ test_name ][ 'status' ] = max_status( t.status, - self.test_results[ test_name ][ 'status' ] ) + self.test_results[ test_name ].status = max_status( t.status, self.test_results[ test_name ].status ) if t.status not in ( EXPECTED, CREATED, BATCHED, FILTERED, SKIPPED ): - self.test_results[ test_name ][ 'current_step' ] = t.name + self.test_results[ test_name ].current_step = t.name if group_name not in self.test_groups: - self.test_groups[ group_name ] = { 'tests': [], 'status': EXPECTED } - self.test_groups[ group_name ][ 'tests' ].append( test_name ) - self.test_groups[ group_name ][ 'status' ] = max_status( t.status, - self.test_groups[ group_name ][ 'status' ] ) + self.test_groups[ group_name ] = TestGroupRecord( tests=[], status=EXPECTED ) + self.test_groups[ group_name ].tests.append( test_name ) + self.test_groups[ group_name ].status = max_status( t.status, self.test_groups[ group_name ].status ) # Collect status names for s in STATUS: - self.status_lists[ s.name ] = [ k for k, v in self.test_results.items() if v[ 'status' ] == s ] + self.status_lists[ s.name ] = [ k for k, v in self.test_results.items() if v.status == s ] self.html_filename = config.report_html_file @@ -103,7 +128,8 @@ def report( self, fp ): configParser.set( "Info", "Time", time.strftime( "%a, %d %b %Y %H:%M:%S" ) ) try: platform = socket.gethostname() - except: + except Exception as e: + logger.debug( str( e ) ) logger.debug( "Could not get host name" ) platform = "unknown" configParser.set( "Info", "Platform", platform ) @@ -209,7 +235,8 @@ def writeHeader( self, sp, refresh ): # Notations: try: platform = socket.gethostname() - except: + except Exception as e: + logger.debug( str( e ) ) logger.debug( "Could not get host name" ) platform = "unknown" @@ -248,25 +275,22 @@ def writeTable( self, sp ): color_pattern = "

{}

" for k, v in self.test_results.items(): - status_str = v[ 'status' ].name + status_str = v.status.name status_formatted = color_pattern.format( COLORS[ status_str ], k, status_str ) - step_shortname = v[ 'current_step' ] - elapsed_formatted = hms( v[ 'elapsed' ] ) + step_shortname = v.current_step + elapsed_formatted = hms( v.elapsed ) output_files = [] - for s in v[ 'steps' ].values(): - if os.path.isfile( s[ 'log' ] ): - output_files.append( file_pattern.format( s[ 'log' ], os.path.basename( s[ 'log' ] ) ) ) - if os.path.isfile( s[ 'log' ] + '.err' ): - output_files.append( - file_pattern.format( s[ 'log' ] + '.err', os.path.basename( s[ 'log' ] + '.err' ) ) ) - for pattern in s[ 'output' ]: + for s in v.steps.values(): + if os.path.isfile( s.log ): + output_files.append( file_pattern.format( s.log, os.path.basename( s.log ) ) ) + if os.path.isfile( s.log + '.err' ): + output_files.append( file_pattern.format( s.log + '.err', os.path.basename( s.log + '.err' ) ) ) + for pattern in s.output: for f in sorted( glob.glob( pattern ) ): if ( ( 'restart' not in f ) or ( '.restartcheck' in f ) ) and os.path.isfile( f ): output_files.append( file_pattern.format( f, os.path.basename( f ) ) ) - row = [ - status_formatted, k, step_shortname, elapsed_formatted, v[ 'resources' ], ', '.join( output_files ) - ] + row = [ status_formatted, k, step_shortname, elapsed_formatted, v.resources, ', '.join( output_files ) ] if status_str == 'FILTERED': table_filt.append( row ) else: From 72e6ef2a982819792197fe635153b8fafbf31b4a Mon Sep 17 00:00:00 2001 From: Christopher Sherman Date: Mon, 29 Jan 2024 16:53:34 -0800 Subject: [PATCH 09/14] Fixing ats environment setup --- geos_ats_package/geos_ats/environment_setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/geos_ats_package/geos_ats/environment_setup.py b/geos_ats_package/geos_ats/environment_setup.py index 698132f..47ced31 100644 --- a/geos_ats_package/geos_ats/environment_setup.py +++ b/geos_ats_package/geos_ats/environment_setup.py @@ -16,7 +16,8 @@ def setup_ats( src_path, build_path, baseline_dir, working_dir, ats_xargs, ats_m ats_main_file = os.path.abspath( os.path.join( src_path, 'inputFiles', 'main.ats' ) ) # Create a symbolic link to working directory - os.makedirs( working_dir, exist_ok=True ) + for d in [ baseline_dir, working_dir, test_path ]: + os.makedirs( d, exist_ok=True ) if os.path.islink( link_path ): print( 'integratedTests symlink already exists' ) else: From 7fa6add13551c28e929ac36d10f6e755a6b56d0e Mon Sep 17 00:00:00 2001 From: Christopher Sherman Date: Thu, 1 Feb 2024 10:50:41 -0800 Subject: [PATCH 10/14] Updating the reporting dataclass for geos_ats --- geos_ats_package/geos_ats/reporting.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/geos_ats_package/geos_ats/reporting.py b/geos_ats_package/geos_ats/reporting.py index f339449..64bf328 100644 --- a/geos_ats_package/geos_ats/reporting.py +++ b/geos_ats_package/geos_ats/reporting.py @@ -33,7 +33,7 @@ } -@dataclass( frozen=True ) +@dataclass class TestStepRecord: status: atsut._StatusCode log: str @@ -42,7 +42,7 @@ class TestStepRecord: elapsed: float -@dataclass( frozen=True ) +@dataclass class TestCaseRecord: steps: dict status: atsut._StatusCode @@ -52,7 +52,7 @@ class TestCaseRecord: resources: int -@dataclass( frozen=True ) +@dataclass class TestGroupRecord: tests: list status: atsut._StatusCode From 98f12048c55b87de4049ec920c81a81672e1707f Mon Sep 17 00:00:00 2001 From: Christopher Sherman Date: Thu, 1 Feb 2024 15:08:16 -0800 Subject: [PATCH 11/14] Fixing machine search bug --- geos_ats_package/geos_ats/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/geos_ats_package/geos_ats/main.py b/geos_ats_package/geos_ats/main.py index 5466012..42e43c1 100644 --- a/geos_ats_package/geos_ats/main.py +++ b/geos_ats_package/geos_ats/main.py @@ -5,7 +5,7 @@ import subprocess import time import logging -from geos_ats import command_line_parsers, test_builder +from geos_ats import command_line_parsers test_actions = ( "run", "rerun", "check", "continue" ) report_actions = ( "run", "rerun", "report", "continue" ) @@ -352,7 +352,7 @@ def main(): else: ats.AtsTest.glue( testcases="all" ) - from geos_ats import ( common_utilities, suite_settings, test_case, test_steps ) + from geos_ats import ( common_utilities, suite_settings, test_case, test_steps, test_builder ) # Set ats options append_geos_ats_summary( ats.manager ) From 2fab13523143cd03a15b25eb1a4af8976188efd9 Mon Sep 17 00:00:00 2001 From: Christopher Sherman Date: Thu, 1 Feb 2024 16:55:27 -0800 Subject: [PATCH 12/14] Adding an error for tests without restart or curve checks --- geos_ats_package/geos_ats/test_steps.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/geos_ats_package/geos_ats/test_steps.py b/geos_ats_package/geos_ats/test_steps.py index ed0409a..7f09908 100644 --- a/geos_ats_package/geos_ats/test_steps.py +++ b/geos_ats_package/geos_ats/test_steps.py @@ -421,6 +421,9 @@ def __init__( self, restartcheck_params=None, curvecheck_params=None, **kw ): if restartcheck_params is not None: self.checksteps.append( restartcheck( restartcheck_params, **kw ) ) + if not self.checksteps: + raise Exception( f'This test does not have a restart or curve check enabled: {self.p.deck}' ) + def label( self ): return "geos" From 8fbeb459d845440dd5a02658d5c70206894bc248 Mon Sep 17 00:00:00 2001 From: Christopher Sherman Date: Fri, 2 Feb 2024 10:38:24 -0800 Subject: [PATCH 13/14] Fixing curve check paths --- .../geos_ats/helpers/curve_check.py | 25 ++++++++----------- geos_ats_package/geos_ats/test_steps.py | 12 +++++++-- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/geos_ats_package/geos_ats/helpers/curve_check.py b/geos_ats_package/geos_ats/helpers/curve_check.py index bdf2213..1353f62 100644 --- a/geos_ats_package/geos_ats/helpers/curve_check.py +++ b/geos_ats_package/geos_ats/helpers/curve_check.py @@ -275,20 +275,17 @@ def compare_time_history_curves( fname, baseline, curve, tolerance, output, outp # Generate script-based curve if script_instructions and ( len( data ) > 0 ): data[ 'script' ] = {} - try: - for script, fn, p, s in script_instructions: - k = location_strings[ p ] - data[ 'script' ][ f'{p} Time' ] = data[ 'target' ][ f'{p} Time' ] - key = f'{p} {k}' - key2 = f'{p}' - if s != DEFAULT_SET_NAME: - key += f' {s}' - key2 += f' {s}' - data[ 'script' ][ key ] = data[ 'target' ][ key ] - data[ 'script' ][ key2 ] = evaluate_external_script( script, fn, data[ 'target' ] ) - data_sizes[ p ][ s ][ 'script' ] = list( np.shape( data[ 'script' ][ key2 ] ) ) - except Exception as e: - errors.append( str( e ) ) + for script, fn, p, s in script_instructions: + k = location_strings[ p ] + data[ 'script' ][ f'{p} Time' ] = data[ 'target' ][ f'{p} Time' ] + key = f'{p} {k}' + key2 = f'{p}' + if s != DEFAULT_SET_NAME: + key += f' {s}' + key2 += f' {s}' + data[ 'script' ][ key ] = data[ 'target' ][ key ] + data[ 'script' ][ key2 ] = evaluate_external_script( script, fn, data[ 'target' ] ) + data_sizes[ p ][ s ][ 'script' ] = list( np.shape( data[ 'script' ][ key2 ] ) ) # Reshape data if necessary so that they have a predictable number of dimensions for k in data.keys(): diff --git a/geos_ats_package/geos_ats/test_steps.py b/geos_ats_package/geos_ats/test_steps.py index 7f09908..54f7a2f 100644 --- a/geos_ats_package/geos_ats/test_steps.py +++ b/geos_ats_package/geos_ats/test_steps.py @@ -657,7 +657,7 @@ class curvecheck( CheckTestStepBase ): params = TestStepBase.defaultParams + CheckTestStepBase.checkParams + ( TestStepBase.commonParams[ "deck" ], TestStepBase.commonParams[ "name" ], TestStepBase.commonParams[ "np" ], TestStepBase.commonParams[ "allow_rebaseline" ], TestStepBase.commonParams[ "baseline_directory" ], - TestStepBase.commonParams[ "output_directory" ], + TestStepBase.commonParams[ "output_directory" ], TestStepBase.commonParams[ "test_directory" ], TestParam( "filename", "Name of the target curve file written by GEOS." ), TestParam( "curves", "A list of parameter, setname value pairs." ), TestParam( @@ -713,6 +713,7 @@ def update( self, dictionary ): self.requireParam( "deck" ) self.requireParam( "baseline_directory" ) self.requireParam( "output_directory" ) + self.requireParam( "test_directory" ) self.baseline_file = os.path.join( self.p.baseline_directory, self.p.filename ) self.target_file = os.path.join( self.p.output_directory, self.p.filename ) @@ -742,7 +743,14 @@ def makeArgs( self ): if self.p.script_instructions is not None: for c in self.p.script_instructions.split( ';' ): args += [ "-s" ] - args += c.split( ',' ) + + # Split the args and set the absolute script + tmp = c.split( ',' ) + tmp[ 0 ] = os.path.abspath( os.path.join( self.p.test_directory, tmp[ 0 ] ) ) + if not os.path.isfile( tmp[ 0 ] ): + raise FileNotFoundError( f"Could not find requested script for curve check: {tmp[0]}" ) + + args += tmp if self.p.warnings_are_errors: args += [ "-w" ] From 6f0da10210ab848b5ad0b688ce06ab57ea204dfd Mon Sep 17 00:00:00 2001 From: Christopher Sherman Date: Fri, 2 Feb 2024 14:55:58 -0800 Subject: [PATCH 14/14] Adding duplicate test check --- geos_ats_package/geos_ats/test_case.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/geos_ats_package/geos_ats/test_case.py b/geos_ats_package/geos_ats/test_case.py index 1bcee3e..fbeeb03 100644 --- a/geos_ats_package/geos_ats/test_case.py +++ b/geos_ats_package/geos_ats/test_case.py @@ -14,6 +14,8 @@ testif = ats.manager.testif logger = logging.getLogger( 'geos_ats' ) +all_test_names = [] + class Batch( object ): """A class to represent batch options""" @@ -51,7 +53,12 @@ def __init__( self, name, desc, label=None, labels=None, steps=[], **kw ): raise Exception( e ) def initialize( self, name, desc, label=None, labels=None, steps=[], batch=Batch( enabled=False ), **kw ): + # Check for duplicate tests + if name in all_test_names: + raise Exception( f'Found multiple tests with the same name ({name})' ) + all_test_names.append( name ) + # Setup the test self.name = name self.desc = desc self.batch = batch