diff --git a/regtest.py b/regtest.py index c592dd1..954ceab 100755 --- a/regtest.py +++ b/regtest.py @@ -690,7 +690,7 @@ def test_suite(argv): # if it is a restart test, then rename the final output file and # restart the test - if test.return_code == 0 and test.restartTest: + if (test.ignore_return_code == 1 or test.return_code == 0) and test.restartTest: skip_restart = False last_file = test.get_compare_file(output_dir=output_dir) @@ -752,14 +752,14 @@ def test_suite(argv): suite.log.log(f"Execution time: {test.wall_time:.3f} s") # Check for performance drop - if test.return_code == 0 and test.check_performance: + if (test.ignore_return_code == 1 or test.return_code == 0) and test.check_performance: test_performance(test, suite, runtimes) #---------------------------------------------------------------------- # do the comparison #---------------------------------------------------------------------- output_file = "" - if test.return_code == 0 and not test.selfTest: + if (test.ignore_return_code == 1 or test.return_code == 0) and not test.selfTest: if test.outputFile == "": if test.compareFile == "": @@ -969,7 +969,7 @@ def test_suite(argv): else: # don't do a pltfile comparison test.compare_successful = True - elif test.return_code == 0: # selfTest + elif (test.ignore_return_code == 1 or test.return_code == 0): # selfTest if args.make_benchmarks is None: @@ -1002,7 +1002,7 @@ def test_suite(argv): #---------------------------------------------------------------------- # do any requested visualization (2- and 3-d only) and analysis #---------------------------------------------------------------------- - if test.return_code == 0 and not test.selfTest: + if (test.ignore_return_code == 1 or test.return_code == 0) and not test.selfTest: if output_file != "": if args.make_benchmarks is None: @@ -1107,7 +1107,7 @@ def test_suite(argv): # if the test ran and passed, add its runtime to the dictionary #---------------------------------------------------------------------- - if test.return_code == 0 and test.record_runtime(suite): + if (test.ignore_return_code == 1 or test.return_code == 0) and test.record_runtime(suite): test_dict = runtimes.setdefault(test.name, suite.timing_default) test_dict["runtimes"].insert(0, test.wall_time) test_dict["dates"].insert(0, suite.test_dir.rstrip("/")) @@ -1168,7 +1168,7 @@ def test_suite(argv): # analysis was not successful. Reset the output image test.analysisOutputImage = "" - elif test.return_code == 0: + elif test.ignore_return_code == 1 or test.return_code == 0: if test.doComparison: shutil.copy(f"{test.name}.status", suite.full_web_dir) diff --git a/suite.py b/suite.py index 2927f68..5a5bd81 100644 --- a/suite.py +++ b/suite.py @@ -61,6 +61,7 @@ def __init__(self, name): self.run_as_script = "" self.script_args = "" self.return_code = None + self.ignore_return_code = 0 self.restartTest = 0 self.restartFileNum = -1 @@ -990,7 +991,7 @@ def run_test(self, test, base_command): test.return_code = ierr # Print compilation error message (useful for CI tests) - if test.return_code != 0 and self.verbose > 0: + if (test.ignore_return_code == 0 and test.return_code != 0) and self.verbose > 0: self.log.warn("Test stdout:") with open(f"{outfile}") as f: print(f.read())