diff --git a/params.py b/params.py index 73e65d4..0f9e731 100644 --- a/params.py +++ b/params.py @@ -1,6 +1,4 @@ -try: import ConfigParser as configparser -except ImportError: - import configparser # python 3 +import configparser import getpass import os @@ -16,19 +14,27 @@ def convert_type(string): if string is None: return None - try: int(string) - except: pass - else: return int(string) + try: + int(string) + except: + pass + else: + return int(string) - try: float(string) - except: pass - else: return float(string) + try: + float(string) + except: + pass + else: + return float(string) return string.strip() def safe_get(cp, sec, opt, default=None): - try: v = cp.get(sec, opt) - except: v = default + try: + v = cp.get(sec, opt) + except: + v = default return v def load_params(args): @@ -52,8 +58,9 @@ def load_params(args): if not os.path.exists(args.input_file[0]): raise OSError(f"Parameter file {args.input_file[0]} does not exist") - - try: cp.read(args.input_file[0]) + + try: + cp.read(args.input_file[0]) except: log.fail(f"ERROR: unable to read parameter file {args.input_file[0]}") @@ -120,7 +127,8 @@ def load_params(args): # now all the other build and source directories other_srcs = [s for s in cp.sections() if s.startswith("extra-")] - if not mysuite.sourceTree in ["AMReX", "amrex"]: other_srcs.append("source") + if not mysuite.sourceTree in ["AMReX", "amrex"]: + other_srcs.append("source") for s in other_srcs: if s.startswith("extra-"): @@ -134,7 +142,8 @@ def load_params(args): build = convert_type(safe_get(cp, s, "build", default=0)) - if s == "source": build = 1 + if s == "source": + build = 1 comp_string = safe_get(cp, s, "comp_string") @@ -165,14 +174,14 @@ def load_params(args): # now flesh out the compile strings -- they may refer to either themselves # or the source dir - for r in mysuite.repos.keys(): + for r in mysuite.repos: s = mysuite.repos[r].comp_string if not s is None: mysuite.repos[r].comp_string = \ s.replace("@self@", mysuite.repos[r].dir).replace("@source@", mysuite.repos["source"].dir) # the suite needs to know any ext_src_comp_string - for r in mysuite.repos.keys(): + for r in mysuite.repos: if not mysuite.repos[r].build == 1: if not mysuite.repos[r].comp_string is None: mysuite.extra_src_comp_string += f" {mysuite.repos[r].comp_string} " @@ -197,7 +206,8 @@ def load_params(args): mysuite.slack_post = 0 else: print(mysuite.slack_webhookfile) - try: f = open(mysuite.slack_webhookfile) + try: + f = open(mysuite.slack_webhookfile) except: mysuite.log.warn("unable to open webhook file") mysuite.slack_post = 0 @@ -212,7 +222,8 @@ def load_params(args): # Make sure the web dir is valid if not os.path.isdir(mysuite.webTopDir): - try: os.mkdir(mysuite.webTopDir) + try: + os.mkdir(mysuite.webTopDir) except: mysuite.log.fail("ERROR: unable to create the web directory: {}\n".format( mysuite.webTopDir)) @@ -223,7 +234,8 @@ def load_params(args): for sec in cp.sections(): - if sec in ["main", "AMReX", "source"] or sec.startswith("extra-"): continue + if sec in ["main", "AMReX", "source"] or sec.startswith("extra-"): + continue # maximum test name length -- used for HTML formatting mysuite.lenTestName = max(mysuite.lenTestName, len(sec)) @@ -252,17 +264,17 @@ def load_params(args): else: # generic setting of the object attribute setattr(mytest, opt, value) - + elif aux_pat.match(opt): - + mytest.auxFiles.append(value) - + elif link_pat.match(opt): - + mytest.linkFiles.append(value) else: - + mysuite.log.warn(f"unrecognized parameter {opt} for test {sec}") @@ -284,7 +296,7 @@ def load_params(args): invalid = 1 else: - + input_file_invalid = mytest.inputFile == "" and not mytest.run_as_script if mytest.buildDir == "" or input_file_invalid or mytest.dim == -1: warn_msg = [f"required params for test {sec} not set", @@ -329,7 +341,7 @@ def load_params(args): # if any runs are parallel, make sure that the MPIcommand is defined - any_MPI = any([t.useMPI for t in test_list]) + any_MPI = any(t.useMPI for t in test_list) if any_MPI and mysuite.MPIcommand == "": mysuite.log.fail("ERROR: some tests are MPI parallel, but MPIcommand not defined") diff --git a/regtest.py b/regtest.py index c592dd1..7720aa6 100755 --- a/regtest.py +++ b/regtest.py @@ -38,7 +38,7 @@ def _check_safety(cs): def check_realclean_safety(compile_strings): split_strings = compile_strings.strip().split() - return all([_check_safety(cs) for cs in split_strings]) + return all(_check_safety(cs) for cs in split_strings) def find_build_dirs(tests): """ given the list of test objects, find the set of UNIQUE build @@ -357,7 +357,7 @@ def test_suite(argv): #-------------------------------------------------------------------------- # check bench dir and create output directories #-------------------------------------------------------------------------- - all_compile = all([t.compileTest == 1 for t in test_list]) + all_compile = all(t.compileTest == 1 for t in test_list) if not all_compile: bench_dir = suite.get_bench_dir() @@ -369,12 +369,11 @@ def test_suite(argv): if suite.slack_post: if args.note == "" and suite.repos["source"].pr_wanted is not None: - note = "testing PR-{}".format(suite.repos["source"].pr_wanted) + note = f"testing PR-{suite.repos["source"].pr_wanted}" else: note = args.note - msg = "> {} ({}) test suite started, id: {}\n> {}".format( - suite.suiteName, suite.sub_title, suite.test_dir, note) + msg = f"> {suite.suiteName} ({suite.sub_title}) test suite started, id: {suite.test_dir}\n> {note}" suite.slack_post_it(msg) if not args.copy_benchmarks is None: @@ -591,7 +590,7 @@ def test_suite(argv): needed_files.append((test.run_as_script, "copy")) if test.inputFile: - suite.log.log("path to input file: {}".format(test.inputFile)) + suite.log.log(f"path to input file: {test.inputFile}") needed_files.append((test.inputFile, "copy")) # strip out any sub-directory from the build dir test.inputFile = os.path.basename(test.inputFile) @@ -741,7 +740,7 @@ def test_suite(argv): if test.customRunCmd is not None: base_cmd = test.customRunCmd - base_cmd += " amr.restart={}".format(restart_file) + base_cmd += f" amr.restart={restart_file}" if args.with_valgrind: base_cmd = "valgrind " + args.valgrind_options + " " + base_cmd @@ -780,7 +779,7 @@ def test_suite(argv): # get the number of levels for reporting if not test.run_as_script and "fboxinfo" in suite.tools: - prog = "{} -l {}".format(suite.tools["fboxinfo"], output_file) + prog = f"{suite.tools["fboxinfo"]} -l {output_file}" stdout0, _, rc = test_util.run(prog) test.nlevels = stdout0.rstrip('\n') if not isinstance(params.convert_type(test.nlevels), int): @@ -825,15 +824,15 @@ def test_suite(argv): else: - command = "{} --abort_if_not_all_found -n 0".format(suite.tools["fcompare"]) + command = f"{suite.tools["fcompare"]} --abort_if_not_all_found -n 0" if test.tolerance is not None: - command += " --rel_tol {}".format(test.tolerance) + command += f" --rel_tol {test.tolerance}" if test.abs_tolerance is not None: - command += " --abs_tol {}".format(test.abs_tolerance) + command += f" --abs_tol {test.abs_tolerance}" - command += " {} {}".format(bench_file, output_file) + command += f" {bench_file} {output_file}" sout, _, ierr = test_util.run(command, outfile=test.comparison_outfile, @@ -859,15 +858,15 @@ def test_suite(argv): if test.compareParticles: for ptype in test.particleTypes.strip().split(): - command = "{}".format(suite.tools["particle_compare"]) + command = f"{suite.tools["particle_compare"]}" if test.particle_tolerance is not None: - command += " --rel_tol {}".format(test.particle_tolerance) + command += f" --rel_tol {test.particle_tolerance}" if test.particle_abs_tolerance is not None: - command += " --abs_tol {}".format(test.particle_abs_tolerance) + command += f" --abs_tol {test.particle_abs_tolerance}" - command += " {} {} {}".format(bench_file, output_file, ptype) + command += f" {bench_file} {output_file} {ptype}" sout, _, ierr = test_util.run(command, outfile=test.comparison_outfile, store_command=True) @@ -892,11 +891,10 @@ def test_suite(argv): suite.log.log("doing the diff...") suite.log.log(f"diff dir: {test.diffDir}") - command = "diff {} -r {} {}".format( - test.diffOpts, diff_dir_bench, test.diffDir) + command = f"diff {test.diffOpts} -r {diff_dir_bench} {test.diffDir}" outfile = test.comparison_outfile - sout, serr, diff_status = test_util.run(command, outfile=outfile, store_command=True) + sout, _, diff_status = test_util.run(command, outfile=outfile, store_command=True) if diff_status == 0: diff_successful = True @@ -1134,24 +1132,23 @@ def test_suite(argv): pass if test.inputFile: - shutil.copy(test.inputFile, "{}/{}.{}".format( - suite.full_web_dir, test.name, test.inputFile)) + shutil.copy(test.inputFile, + f"{suite.full_web_dir}/{test.name}.{test.inputFile}") if test.has_jobinfo: - shutil.copy(job_info_file, "{}/{}.job_info".format( - suite.full_web_dir, test.name)) + shutil.copy(job_info_file, + f"{suite.full_web_dir}/{test.name}") if suite.sourceTree == "C_Src" and test.probinFile != "": - shutil.copy(test.probinFile, "{}/{}.{}".format( - suite.full_web_dir, test.name, test.probinFile)) + shutil.copy(test.probinFile, + f"{suite.full_web_dir}/{test.name}.{test.probinFile}") for af in test.auxFiles: # strip out any sub-directory under build dir for the aux file # when copying shutil.copy(os.path.basename(af), - "{}/{}.{}".format(suite.full_web_dir, - test.name, os.path.basename(af))) + f"{suite.full_web_dir}/{test.name}.{os.path.basename(af)}") if not test.png_file is None: try: diff --git a/suite.py b/suite.py index 892c987..1096acc 100644 --- a/suite.py +++ b/suite.py @@ -8,8 +8,10 @@ import test_util import tempfile as tf -try: from json.decoder import JSONDecodeError -except ImportError: JSONDecodeError = ValueError +try: + from json.decoder import JSONDecodeError +except ImportError: + JSONDecodeError = ValueError DO_TIMINGS_PLOTS = True @@ -30,7 +32,7 @@ import matplotlib.pyplot as plt try: - import matplotlib.dates as dates + from matplotlib import dates except: DO_TIMINGS_PLOTS = False @@ -171,11 +173,10 @@ def get_compare_file(self, output_dir=None): filepath = os.path.join(output_dir, outfile) if not os.path.isfile(filepath) or self.crashed: - self.log.warn("test did not produce any output") return "" - else: return outfile + return outfile plts = [d for d in os.listdir(output_dir) if \ (os.path.isdir(d) and @@ -204,8 +205,10 @@ def measure_performance(self): meets_threshold = ratio < self.performance_threshold percentage = 100 * (1 - ratio) - if percentage < 0: compare_str = "slower" - else: compare_str = "faster" + if percentage < 0: + compare_str = "slower" + else: + compare_str = "faster" return meets_threshold, abs(percentage), compare_str @@ -218,7 +221,8 @@ def passed(self): """ Whether the test passed or not """ compile = self.compile_successful - if self.compileTest or not compile: return compile + if self.compileTest or not compile: + return compile compare = not self.doComparison or self.compare_successful analysis = self.analysisRoutine == "" or self.analysis_successful @@ -347,9 +351,11 @@ def set_check_performance(self, value): def get_performance_threshold(self): """ Returns the threshold at which to warn of a performance drop. """ - if Test.performance_params: return float(Test.performance_params[0]) - elif self._check_performance: return self._performance_threshold - else: return None + if Test.performance_params: + return float(Test.performance_params[0]) + if self._check_performance: + return self._performance_threshold + return None def set_performance_threshold(self, value): """ Setter for performance_threshold. """ @@ -359,9 +365,11 @@ def set_performance_threshold(self, value): def get_runs_to_average(self): """ Returns the number of past runs to include in the running runtime average. """ - if Test.performance_params: return int(Test.performance_params[1]) - elif self._check_performance: return self._runs_to_average - else: return None + if Test.performance_params: + return int(Test.performance_params[1]) + if self._check_performance: + return self._runs_to_average + return None def set_runs_to_average(self, value): """ Setter for runs_to_average. """ @@ -638,7 +646,8 @@ def make_test_dirs(self): shutil.rmtree(full_test_dir) else: for i in range(1, maxRuns): - if not os.path.isdir(full_test_dir): break + if not os.path.isdir(full_test_dir): + break test_dir = today + f"-{i:03d}/" full_test_dir = self.testTopDir + self.suiteName + "-tests/" + test_dir @@ -716,7 +725,7 @@ def extract_time(file): # this is of the form:
  • Execution time: 412.930 s return float(line.split(":")[1].strip().split(" ")[0]) - elif "(seconds)" in line: + if "(seconds)" in line: # this is the older form -- split on "=" # form:

    Execution Time (seconds) = 399.414828 return float(line.split("=")[1]) @@ -731,9 +740,11 @@ def extract_time(file): timings = json.load(open(json_file)) # Check for proper format item = next(iter(timings.values())) - if not isinstance(item, dict): raise ValueError + if not isinstance(item, dict): + raise ValueError return timings - except (OSError, ValueError, JSONDecodeError, StopIteration): pass + except (OSError, ValueError, JSONDecodeError, StopIteration): + pass valid_dirs, all_tests = self.get_run_history(check_activity=False) @@ -763,11 +774,15 @@ def extract_time(file): for test in filter(lambda x: x in passed, all_tests): file = f"{dir_path}/{test}.html" - try: file = open(file) - except: continue + try: + file = open(file) + except: + continue - try: time = extract_time(file) - except RuntimeError: continue + try: + time = extract_time(file) + except RuntimeError: + continue test_dict = timings.setdefault(test, self.timing_default) test_dict["runtimes"].append(time) @@ -777,22 +792,21 @@ def extract_time(file): return timings - def make_timing_plots(self, active_test_list=None, valid_dirs=None, all_tests=None): + def make_timing_plots(self, active_test_list=None, all_tests=None): """ plot the wallclock time history for all the valid tests """ if active_test_list is not None: - valid_dirs, all_tests = self.get_run_history(active_test_list) + _, all_tests = self.get_run_history(active_test_list) timings = self.get_wallclock_history() - try: bokeh + try: + bokeh except NameError: - convf = dates.datestr2num using_mpl = True self.plot_ext = "png" else: - convf = lambda s: dt.strptime(s, '%Y-%m-%d') using_mpl = False self.plot_ext = "html" @@ -800,7 +814,8 @@ def make_timing_plots(self, active_test_list=None, valid_dirs=None, all_tests=No def convert_date(date): """ Convert to a matplotlib readable date""" - if len(date) > 10: date = date[:date.rfind("-")] + if len(date) > 10: + date = date[:date.rfind("-")] return convf(date) def hover_tool(): @@ -816,13 +831,16 @@ def hover_tool(): # make the plots for t in all_tests: - try: test_dict = timings[t] - except KeyError: continue + try: + test_dict = timings[t] + except KeyError: + continue days = list(map(convert_date, test_dict["dates"])) times = test_dict["runtimes"] - if len(times) == 0: continue + if len(times) == 0: + continue if using_mpl: @@ -854,7 +872,8 @@ def hover_tool(): source = ColumnDataSource(dict(date=days, runtime=times)) settings = dict(x_axis_type="datetime") - if max(times) / min(times) > 10.0: settings["y_axis_type"] = "log" + if max(times) / min(times) > 10.0: + settings["y_axis_type"] = "log" plot = figure(**settings) plot.add_tools(hover_tool()) @@ -874,13 +893,13 @@ def get_last_run(self): # this will work through 2099 if os.path.isdir(outdir): - dirs = [d for d in os.listdir(outdir) if (os.path.isdir(outdir + d) and - d.startswith("20"))] + dirs = [d for d in os.listdir(outdir) + if os.path.isdir(outdir + d) and d.startswith("20")] dirs.sort() return dirs[-1] - else: - return None + + return None def get_test_failures(self, test_dir): """ look at the test run in test_dir and return the list of tests that @@ -895,7 +914,8 @@ def get_test_failures(self, test_dir): failed = [] for test in os.listdir("."): - if not os.path.isdir(test): continue + if not os.path.isdir(test): + continue # the status files are in the web dir status_file = f"{self.webTopDir}/{test_dir}/{test}.status" @@ -956,10 +976,10 @@ def build_c(self, test=None, opts="", target="", outfile=None, c_make_additions= all_opts, self.COMP, c_make_additions, target) self.log.log(comp_string) - stdout, stderr, rc = test_util.run(comp_string, outfile=outfile) + _, _, rc = test_util.run(comp_string, outfile=outfile) # make returns 0 if everything was good - if not rc == 0: + if rc != 0: self.log.warn("build failed") return comp_string, rc @@ -979,13 +999,15 @@ def run_test(self, test, base_command): outfile = test.outfile - if test.run_as_script: errfile = None - else: errfile = test.errfile + if test.run_as_script: + errfile = None + else: + errfile = test.errfile self.log.log(test_run_command) - sout, serr, ierr = test_util.run(test_run_command, stdin=True, - outfile=outfile, errfile=errfile, - env=test_env) + _, _, ierr = test_util.run(test_run_command, stdin=True, + outfile=outfile, errfile=errfile, + env=test_env) test.run_command = test_run_command test.return_code = ierr @@ -1028,17 +1050,21 @@ def build_tools(self, test_list): self.make_realclean(repo="AMReX") ftools = self.ftools - if ("fextract" in self.extra_tools): ftools.append("fextract") - if ("fextrema" in self.extra_tools): ftools.append("fextrema") - if ("ftime" in self.extra_tools): ftools.append("ftime") - if any([t for t in test_list if t.tolerance is not None or t.abs_tolerance is not None]): ftools.append("fvarnames") + if "fextract" in self.extra_tools: + ftools.append("fextract") + if "fextrema" in self.extra_tools: + ftools.append("fextrema") + if "ftime" in self.extra_tools: + ftools.append("ftime") + if any(t for t in test_list if t.tolerance is not None or t.abs_tolerance is not None): + ftools.append("fvarnames") for t in ftools: self.log.log(f"building {t}...") - comp_string, rc = self.build_c(target=f"programs={t}", - opts="DEBUG=FALSE USE_MPI=FALSE USE_OMP=FALSE ", - c_make_additions="", outfile=f"{t}.make.out") - if not rc == 0: + _, rc = self.build_c(target=f"programs={t}", + opts="DEBUG=FALSE USE_MPI=FALSE USE_OMP=FALSE ", + c_make_additions="", outfile=f"{t}.make.out") + if rc != 0: self.log.fail("unable to continue, tools not able to be built") exe = test_util.get_recent_filename(self.f_compare_tool_dir, t, ".ex") @@ -1063,14 +1089,14 @@ def build_tools(self, test_list): for t in ctools: self.log.log(f"building {t}...") comp_string, rc = self.build_c(opts=f"DEBUG=FALSE USE_MPI=FALSE EBASE={t}") - if not rc == 0: + if rc != 0: self.log.fail("unable to continue, tools not able to be built") exe = test_util.get_recent_filename(self.c_compare_tool_dir, t, ".exe") self.tools[t] = f"{self.c_compare_tool_dir}/{exe}" - if ("DiffSameDomainRefined" in self.extra_tools): + if "DiffSameDomainRefined" in self.extra_tools: self.extra_tool_dir = "{}/Tools/C_util/Convergence/".format( os.path.normpath(self.amrex_dir)) @@ -1079,18 +1105,24 @@ def build_tools(self, test_list): self.make_realclean(repo="AMReX") extra_tools=[] - if ("DiffSameDomainRefined1d" in self.extra_tools): extra_tools.append("DiffSameDomainRefined1d") - if ("DiffSameDomainRefined2d" in self.extra_tools): extra_tools.append("DiffSameDomainRefined2d") - if ("DiffSameDomainRefined3d" in self.extra_tools): extra_tools.append("DiffSameDomainRefined3d") + if "DiffSameDomainRefined1d" in self.extra_tools: + extra_tools.append("DiffSameDomainRefined1d") + if "DiffSameDomainRefined2d" in self.extra_tools: + extra_tools.append("DiffSameDomainRefined2d") + if "DiffSameDomainRefined3d" in self.extra_tools: + extra_tools.append("DiffSameDomainRefined3d") for t in extra_tools: - if ("1d" in t): ndim=1 - if ("2d" in t): ndim=2 - if ("3d" in t): ndim=3 + if "1d" in t: + ndim=1 + if "2d" in t: + ndim=2 + if "3d" in t: + ndim=3 self.log.log(f"building {t}...") comp_string, rc = self.build_c(opts= f"EBASE=DiffSameDomainRefined DIM={ndim} DEBUG=FALSE USE_MPI=FALSE USE_OMP=FALSE ") - if not rc == 0: + if rc != 0: self.log.fail("unable to continue, tools not able to be built") exe = test_util.get_recent_filename(self.extra_tool_dir, t, ".ex") @@ -1154,7 +1186,8 @@ def cmake_config( self, name, path, configOpts="", install = 0, env = None): if self.COMP: ENV['CXX'] = self.COMP - if env is not None: ENV.update(env) + if env is not None: + ENV.update(env) # remove build and installation directories if present and re-make them if os.path.isdir(builddir): @@ -1180,10 +1213,10 @@ def cmake_config( self, name, path, configOpts="", install = 0, env = None): cmd += '-DAMReX_INSTALL=OFF' self.log.log(cmd) - stdout, stderr, rc = test_util.run(cmd, outfile=coutfile, env=ENV) + _, _, rc = test_util.run(cmd, outfile=coutfile, env=ENV) # Check exit condition - if not rc == 0: + if rc != 0: errstr = "\n \nERROR! CMake configuration failed for " + name + " \n" errstr += "Check " + coutfile + " for more information." self.log.fail(errstr) @@ -1198,7 +1231,7 @@ def cmake_config( self, name, path, configOpts="", install = 0, env = None): return builddir, installdir - def cmake_clean( self, name, path ): + def cmake_clean(self, name, path ): "Clean CMake build and install directories" self.log.outdent() @@ -1217,9 +1250,7 @@ def cmake_clean( self, name, path ): if os.path.isdir(installdir): shutil.rmtree(installdir) - return - - def cmake_build( self, name, target, path, opts = '', env = None, outfile = None ): + def cmake_build(self, name, target, path, opts = '', env = None, outfile = None ): "Build target for a repo configured via CMake" self.log.outdent() @@ -1229,7 +1260,8 @@ def cmake_build( self, name, target, path, opts = '', env = None, outfile = None # Set enviroment ENV = dict(os.environ) # Copy of current enviroment - if env is not None: ENV.update(env) + if env is not None: + ENV.update(env) if outfile is not None: coutfile = outfile @@ -1241,10 +1273,10 @@ def cmake_build( self, name, target, path, opts = '', env = None, outfile = None cmd = f'{self.cmake} --build {self.source_build_dir} -j {self.numMakeJobs} -- {opts} {target}' self.log.log(cmd) - stdout, stderr, rc = test_util.run(cmd, outfile=coutfile, cwd=path, env=ENV ) + _, _, rc = test_util.run(cmd, outfile=coutfile, cwd=path, env=ENV ) # make returns 0 if everything was good - if not rc == 0: + if rc != 0: errstr = "Failed to build target " + target errstr += ". Check " + coutfile + " for more information." self.log.fail(errstr) @@ -1253,8 +1285,6 @@ def cmake_build( self, name, target, path, opts = '', env = None, outfile = None return rc, comp_string - - def build_test_cmake(self, test, opts="", outfile=None): """ build an executable with CMake build system """ @@ -1264,7 +1294,7 @@ def build_test_cmake(self, test, opts="", outfile=None): # add additional CMake config options and re-configure on existing configured # build directory, if additional build cmakeSetupOpts are set if self.isSuperbuild or test.cmakeSetupOpts != "": - builddir, installdir = self.cmake_config( + builddir, _ = self.cmake_config( name=test.name, path=self.source_dir, configOpts=self.amrex_cmake_opts + " " + @@ -1292,7 +1322,7 @@ def build_test_cmake(self, test, opts="", outfile=None): path_to_exe = None # search by target name - for root, dirnames, filenames in os.walk(self.source_build_dir): + for root, _, filenames in os.walk(self.source_build_dir): if test.target in filenames: path_to_exe = os.path.join(root, test.target) break @@ -1314,7 +1344,7 @@ def build_test_cmake(self, test, opts="", outfile=None): rc = 1 else: # Find location of executable - for root, dirnames, filenames in os.walk(path_to_bin): + for root, _, filenames in os.walk(path_to_bin): for f in filenames: f_path = os.path.join(root, f) if os.access(f_path, os.X_OK): @@ -1340,17 +1370,19 @@ def build_test_cmake(self, test, opts="", outfile=None): def f_flag(opt, test_not=False): """ convert a test parameter into t if true for the Fortran build system """ if test_not: - if opt: return " " - else: return "t" - else: - if opt: return "t" - else: return " " + if opt: + return " " + return "t" + if opt: + return "t" + return " " def c_flag(opt, test_not=False): """ convert a test parameter into t if true for the Fortran build system """ if test_not: - if opt: return "FALSE" - else: return "TRUE" - else: - if opt: return "TRUE" - else: return "FALSE" + if opt: + return "FALSE" + return "TRUE" + if opt: + return "TRUE" + return "FALSE" diff --git a/test_report.py b/test_report.py index 9c1bdea..7e288f4 100644 --- a/test_report.py +++ b/test_report.py @@ -247,7 +247,7 @@ def write_list(self): self.of.write("

  • ") # finish nesting - for n in range(0, current_indent): + for _ in range(0, current_indent): self.of.write("\n") self.of.write("\n") @@ -303,7 +303,7 @@ def print_row(self, row_list, highlight=False): def end_table(self): self.hf.write("\n") if not self.divs is None: - for n in range(len(self.divs)): + for _ in range(len(self.divs)): self.hf.write("\n") @@ -595,7 +595,7 @@ def report_single_test(suite, test, tests, failure_msg=None): variables_error = False no_bench_error = False particle_counts_differ_error = False - + pcomp_line = get_particle_compare_command(diff_lines) for line in diff_lines: @@ -797,7 +797,7 @@ def report_this_test_run(suite, make_benchmarks, note, update_time, "
  • branch: {}; hash: {}
  • " + \ "
  • changelog: {}
  • " - for k, r in suite.repos.items(): + for _, r in suite.repos.items(): if r.update: if r.pr_wanted is not None: branch = f"PR #{r.pr_wanted}" @@ -1008,7 +1008,7 @@ def report_coverage(html_file, suite): tvars = (suite.covered_frac, suite.total, suite.covered_nonspecific_frac, suite.total_nonspecific) if not all(tvars): return - + cols = ["coverage type", "coverage %", "# covered", "# uncovered"] ht = HTMLTable(html_file, len(cols), divs=["summary"]) @@ -1048,7 +1048,7 @@ def report_all_runs(suite, active_test_list, max_per_page=50): valid_dirs, all_tests = suite.get_run_history(active_test_list) if suite.do_timings_plots: - suite.make_timing_plots(valid_dirs=valid_dirs, all_tests=all_tests) + suite.make_timing_plots(all_tests=all_tests) # how many pages are we going to spread this over? npages = int(len(valid_dirs)/max_per_page)+1 diff --git a/test_util.py b/test_util.py index c57904f..cbbca99 100644 --- a/test_util.py +++ b/test_util.py @@ -4,7 +4,6 @@ import shlex import subprocess import sys -import email import smtplib USAGE = """