diff --git a/.circleci/config.yml b/.circleci/config.yml index 907346eb38..39f1140133 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -13,7 +13,6 @@ commands: echo 'export HDF5_LIB=/usr/lib/x86_64-linux-gnu' >> $BASH_ENV echo 'export CHARM_ROOT=$HOME/local/charm-v$CHARM_VER' >> $BASH_ENV echo 'export Grackle_ROOT=$HOME/local' >> $BASH_ENV - echo 'export GRACKLE_INPUT_DATA_DIR=$HOME/grackle/grackle_data_files/input/' >> $BASH_ENV # tag the tip so we can go back to it git tag tip @@ -125,9 +124,24 @@ commands: # convert boolean parameter to an env var storing 0 or 1 SKIP_TEST=$(( 0 <<# parameters.skiptest >> + 1 <> )) USE_DOUBLE=$(( 0 <<# parameters.usedouble >> + 1 <> )) + USE_GRACKLE=$(( 0 <<# parameters.usegrackle >> + 1 <> )) + + # this is used for tests involving Grackle + if [[ $USE_GRACKLE == 1 ]]; then + GRACKLE_INPUT_DATA_DIR="$HOME/grackle/grackle_data_files/input/" + else + GRACKLE_INPUT_DATA_DIR="" + fi if [ ! -f << parameters.skipfile >> ]; then - cmake -DEnzo-E_CONFIG=linux_gcc -GNinja -DUSE_DOUBLE_PREC=<< parameters.usedouble >> -DUSE_GRACKLE=<< parameters.usegrackle >> -Bbuild -DPARALLEL_LAUNCHER_NPROC_ARG="++local;+p" -DPython3_FIND_VIRTUALENV=ONLY + cmake -DEnzo-E_CONFIG=linux_gcc \ + -GNinja \ + -DUSE_DOUBLE_PREC=<< parameters.usedouble >> \ + -DUSE_GRACKLE=<< parameters.usegrackle >> \ + -DGRACKLE_INPUT_DATA_DIR="$GRACKLE_INPUT_DATA_DIR" \ + -Bbuild \ + -DPARALLEL_LAUNCHER_NPROC_ARG="++local;+p" \ + -DPython3_FIND_VIRTUALENV=ONLY cmake --build build -j 4 source $HOME/venv/bin/activate if [[ $SKIP_TEST != 1 ]]; then diff --git a/input/Grackle/run_endtime_grackle_test.py b/input/Grackle/run_endtime_grackle_test.py new file mode 100644 index 0000000000..35e08cd9df --- /dev/null +++ b/input/Grackle/run_endtime_grackle_test.py @@ -0,0 +1,144 @@ +import argparse +import os.path +import sys + +_LOCAL_DIR = os.path.dirname(os.path.realpath(__file__)) +_TOOLS_DIR = os.path.join(_LOCAL_DIR, "../../tools") +if os.path.isdir(_TOOLS_DIR): + sys.path.insert(0, _TOOLS_DIR) + from gen_grackle_testing_file import generate_grackle_input_file + from run_cpp_test import run_test_and_check_success + +else: + raise RuntimeError( + f"expected testing utilities to be defined in {_TOOLS_DIR}, but that " + "directory does not exist" + ) + +parent_parser = argparse.ArgumentParser(add_help=False) + +parent_parser.add_argument( + "--grackle-data-file", required = True, type = str, + help = ("Specifies the path to the grackle data file that is to be used in " + "the simultaion.") +) +parent_parser.add_argument( + "--generate-config-path", required = True, type = str, + help = ("Specifies the path to the configuration file that is generated by " + "this program. The generated file includes the contents of " + "--nominal-config-path and overwrites path to the grackle data " + "file based on --grackle-data-path") +) +parent_parser.add_argument( + '--launch_cmd', required = True, type = str, + help = "Specifies the commands used to launch the Enzo-E simulation" +) +parent_parser.add_argument( + "--output-dump", action = "store", default = None, + help = ("Specifies the path where a copy of the standard output stream " + "from the execution of the program should optionally be dumped " + "(the data is still written to the standard output stream). The " + "contents of this file may be used to determine the outcome of " + "the tests.") +) + +_description = '''\ +Runs a test Grackle-related test that succeeds or fails based on the completion +time of the test. The success/failure of the test is reflected by the return +code of this program (an exit code of 0 indicates the test was entirely +successful). +''' + +_epilog = '''\ +In more detail, this function expects most of the test problem's parameters to +be specified by the file at the location given by --nominal_config_path. + +The program will generate a new configuration file that uses all of the +parameters from the --nominal-config-path file but overwrites the parameter +used to specify the grackle data file with the value specified by +--grackle-data-file. The generated config file is written to the path given by +--generate-config-path. + +Finally, the program executes Enzo-E with this generated configuration +file and reports whether the tests have passed. +''' + +parser = argparse.ArgumentParser(description = _description, epilog = _epilog, + parents = [parent_parser]) +parser.add_argument( + "--nominal-config-path", required = True, type = str, + help = ("Specifies the path to the configuration file that specifies most " + "parameters for the test problem.") +) + + + +def run_grackle_test(launcher, nominal_config_path, generate_config_path, + grackle_data_file, dump_path = None): + """ + Runs an enzo-e simulation test problem involving Grackle. + + In detail, this function: + - expects most of the test problem's parameters to be specified by the + file at the location given by `nominal_config_path`. + - generates a new configuration file at the path given by + `generate_config_path`. This file includes all of the parameters from + the `nominal_config_path`, but overwrites the parameter used to specify + the grackle data file with the value specified by `grackle_data_dir`. + - the function then executes enzo-e with this generated configuration + file and reports whether all tests built into the simulation (e.g. an + expected completion time) have passed, if there are any. + + Parameters + ---------- + launcher: str + Specifies the command used to launch enzo-e. + nominal_config_path: str + Specifies the path to the config file that specifies the bulk of the + generate_config_path: str + Specifies the path where the temporary input file should be written. + grackle_data_file: str + Specifies the path to the grackle data file that is to be used by the + test problem. + dump_path: str, optional + Path to a file where the output of the simulation should be written. + If this is None (the default), the output is written to a temporary + file. + + Returns + ------- + tests_pass: bool + Specifies whether the simulation ran successfully and whether all tests + that are built into the simulation have passed (if there are any). + """ + + print("generating config file at {} that uses the grackle data file at {}"\ + .format(generate_config_path, grackle_data_file)) + generate_grackle_input_file( + include_path = nominal_config_path, + data_path = grackle_data_file, + use_abs_paths = True, + output_fname = generate_config_path + ) + + print("Executing Enzo-E") + test_passes = run_test_and_check_success( + command = launcher, args_for_command = [generate_config_path], + dump_path = "cooling-test.in" + ) + return test_passes + +if __name__ == '__main__': + args = parser.parse_args() + + test_passes = run_grackle_test( + launcher = args.launch_cmd, + nominal_config_path = args.nominal_config_path, + generate_config_path = args.generate_config_path, + grackle_data_file = args.grackle_data_file, + dump_path = args.output_dump + ) + if test_passes: + sys.exit(0) + else: + sys.exit(1) diff --git a/input/Grackle/run_general_grackle_test.py b/input/Grackle/run_general_grackle_test.py new file mode 100644 index 0000000000..cdf063bd22 --- /dev/null +++ b/input/Grackle/run_general_grackle_test.py @@ -0,0 +1,79 @@ +#!/bin/python + +# runs a generic Grackle test where we compare summary statistics about the +# fields at the final snapshot + +import argparse +import os.path +import sys + +from run_endtime_grackle_test import run_grackle_test, parent_parser + +_LOCAL_DIR = os.path.dirname(os.path.realpath(__file__)) +_TOOLS_DIR = os.path.join(_LOCAL_DIR, "../../tools") +if os.path.isdir(_TOOLS_DIR): + sys.path.insert(0, _TOOLS_DIR) + from field_summary import compare_against_reference + +else: + raise RuntimeError( + f"expected testing utilities to be defined in {_TOOLS_DIR}, but that " + "directory does not exist" + ) + + + + +parser = argparse.ArgumentParser( + description = ("Runs a general Grackle test that compares summary " + "statistics at a completion time with some reference " + "values."), + parents = [parent_parser] +) + +parser.add_argument( + "--prec", required = True, type = str, choices = ["single", "double"], + help = "Specifies the precision of Enzo-E." +) + +if __name__ == '__main__': + args = parser.parse_args() + + test_passes = run_grackle_test( + launcher = args.launch_cmd, + nominal_config_path = os.path.join(_LOCAL_DIR, + 'method_grackle_general.in'), + generate_config_path = args.generate_config_path, + grackle_data_file = args.grackle_data_file, + dump_path = args.output_dump + ) + # note that there aren't actually any built-in tests in this test problem. + # Thus, if test_passes is False, that means that Enzo-E crashed + if not test_passes: + raise RuntimeError("Enzo-E crashed") + + + # now check field values against the reference values + if args.prec == 'double': + _ref_tab = os.path.join(_LOCAL_DIR, 'ref_general_grackle-double.csv') + atol = 0 + # these should be flexible for different compiler versions + rtol = {"min" : 5e-15, "max" : 5e-6, "mean" : 5e-8, + "standard_deviation" : 5e-8} + else: + _ref_tab = os.path.join(_LOCAL_DIR, 'ref_general_grackle-single.csv') + atol = 0 + # the following may need to be relaxed for different compiler versions + rtol = dict((k, 1e-7) for k in ["min","max","mean", + "standard_deviation"]) + + test_passes = compare_against_reference( + './GeneralGrackle-500.00/GeneralGrackle-500.00.block_list', + ref_summary_file_path = _ref_tab, + atol = atol, rtol = rtol, report_path = None + ) + + if test_passes: + sys.exit(0) + else: + sys.exit(1) diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 4169e4e996..e028f1c01a 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -172,6 +172,46 @@ if (USE_YT_BASED_TESTS) setup_test_parallel_python(merge_sinks_stationary_parallel merge_sinks/stationary/parallel "input/merge_sinks/run_merge_sinks_test.py" "--prec=${PREC_STRING}" "--ics_type=stationary") setup_test_serial_python(merge_sinks_drift_serial merge_sinks/drift/serial "input/merge_sinks/run_merge_sinks_test.py" "--prec=${PREC_STRING}" "--ics_type=drift") setup_test_parallel_python(merge_sinks_drift_parallel merge_sinks/drift/parallel "input/merge_sinks/run_merge_sinks_test.py" "--prec=${PREC_STRING}" "--ics_type=drift") + +endif() + +# Grackle tests +# +# because Grackle is a separate library, the user needs to specify the path +# to the data directory where the grackle data files are installed. if this is +# not specified, the Grackle tests are skipped + +if(NOT (DEFINED GRACKLE_INPUT_DATA_DIR)) + set(GRACKLE_INPUT_DATA_DIR "") +endif() + +if(USE_GRACKLE AND (GRACKLE_INPUT_DATA_DIR STREQUAL "")) + message(STATUS + " IMPORTANT: No tests involving Grackle have been defined (even though Grackle is being used). To enable these tests, try `-DGRACKLE_INPUT_DATA_DIR=/path/to/grackle/data/dir`." + ) +elseif(USE_GRACKLE) + # (non-yt-based test) + setup_test_parallel_python(grackle_cooling_dt grackle + "input/Grackle/run_endtime_grackle_test.py" + "--grackle-data-file=${GRACKLE_INPUT_DATA_DIR}/CloudyData_UVB=HM2012_shielded.h5" + "--nominal-config-path=input/Grackle/method_grackle_cooling_dt.in" + # by using relative paths in the following 2 arguments, we effectively + # specify output files in the testing directory + "--generate-config-path=./temp_grackle_cooling_dt.in" + "--output-dump=./grackle_cooling_dt.log" + ) + + if (USE_YT_BASED_TESTS) + setup_test_parallel_python(grackle_general grackle + "input/Grackle/run_general_grackle_test.py" + "--prec=${PREC_STRING}" + "--grackle-data-file=${GRACKLE_INPUT_DATA_DIR}/CloudyData_UVB=HM2012_shielded.h5" + # by using relative paths in the following 2 arguments, we effectively + # specify output files in the testing directory + "--generate-config-path=./temp_grackle_general.in" + "--output-dump=./grackle_general_test.log" + ) + endif() endif() # Convert markdown file to html file for more flexible viewing diff --git a/test/MethodGrackle/SConscript b/test/MethodGrackle/SConscript deleted file mode 100644 index 921f54c408..0000000000 --- a/test/MethodGrackle/SConscript +++ /dev/null @@ -1,171 +0,0 @@ -Import('env') -Import('parallel_run') -Import('serial_run') -Import('ip_charm') - -Import('bin_path') -Import('test_path') - -Import('use_valgrind') -Import('use_grackle') -Import('prec') - -import os, os.path - -#---------------------------------------------------------- -#defines -#---------------------------------------------------------- - -env['CPIN'] = 'touch parameters.out; mv parameters.out ${TARGET}.in' -env['RMIN'] = 'rm -f parameters.out' -env['clocal_cmd'] = '++local' - -date_cmd = 'echo $TARGET > test/STATUS; echo "---------------------"; date +"%Y-%m-%d %H:%M:%S";' - - -grackle_data_dir = os.getenv('GRACKLE_INPUT_DATA_DIR', '') - -def _full_path_to_node(node): - return os.path.abspath(os.path.join(os.getcwd(), str(node[0]))) - -if grackle_data_dir != '' and use_grackle: - # first, prepare the builder that constructs the modified Grackle input file - # (so that the file includes a correct path to the data file) - _script_path = os.path.abspath( - os.path.join(os.getcwd(), '../../tools/gen_grackle_testing_file.py') - ) - _cmd = ( - "python {_script_path} $SOURCE " - " -d {grackle_data_dir}/CloudyData_UVB=HM2012_shielded.h5 " - " --use-abs-paths " - " -o $TARGET" - ).format(_script_path = _script_path, grackle_data_dir = grackle_data_dir) - make_grackle_input = Builder(action = _cmd) - - - # now prepare the builder that executes tools/field_summary.py (the script - # actually performs the test) - _script_path = os.path.abspath( - os.path.join(os.getcwd(), '../../tools/field_summary.py') - ) - _cmd = ( - "rm --force $TARGET; " - "python {_script_path} cmp $SOURCE " - " --ref $REF_TABLE " - " --report $TARGET " - " --rtol $RTOL --atol $ATOL " - " > $LOG_FILE" - ).format(_script_path = _script_path) - - compare_field_summary = Builder( - action = _cmd + "; awk '{ print \" \", $0 }' $TARGET" - ) - - # now add these builders to the environment and clone the environment - env.Append(BUILDERS = {'MakeGrackleInput' : make_grackle_input, - 'CompareFieldSummary' : compare_field_summary}) - -#------------------------------------------------------------- -#load balancing -#------------------------------------------------------------- - -if grackle_data_dir != '' and use_grackle: - # method_grackle_cooling_dt.unit - # =========================== - # (This test uses the cooling time to set the timestep and just checks the - # final time after a fixed number of cycles) - - # Part 1 of 2: Construct the input file (with the correct path to the - # grackle file) - input_grackle_cooling_dt = env.MakeGrackleInput( - target = "temp_method_grackle_cooling_dt.in", - source = "../../input/Grackle/method_grackle_cooling_dt.in" - # source file must be relative to this SConscript file's location - ) - Clean(input_grackle_cooling_dt, - _full_path_to_node(input_grackle_cooling_dt)) - - - # Part 2 of 2: Run Enzo-E - grackle_cooling_dt = env.RunParallel( - "grackle_cooling_dt.unit", - bin_path + '/enzo-e', - ARGS = _full_path_to_node(input_grackle_cooling_dt), - COPY = '' # no data outputs to copy/move - ) - # there's no data outputs to cleanup - - # the following is required so that the intermediate input file will be - # generated when running this test - Depends(grackle_cooling_dt, input_grackle_cooling_dt) - - - - # method_grackle_general.unit - # =========================== - # This test compares summary statisitics for a selection of fields to - # previously tabulated values. - - # Part 1 of 3: Write an input file with a valid path to a Grackle data file - input_grackle_general = env.MakeGrackleInput( - target = "temp_method_grackle_general.in", - source = "../../input/Grackle/method_grackle_general.in" - # source file must be relative to this SConscript file's location - ) - Clean(input_grackle_general, _full_path_to_node(input_grackle_general)) - - - # Part 2 of 3: Run Enzo-E - grackle_general_simulation = env.RunParallel( - "grackle_general_simulation.log", - bin_path + '/enzo-e', - ARGS = _full_path_to_node(input_grackle_general), - COPY = ('rm -rf ' + test_path + '/MethodGrackle/GeneralGrackle-*; ' + - 'mv `ls -d GeneralGrackle-*` ' + test_path + '/MethodGrackle/') - ) - _blocklist_fname = 'GeneralGrackle-500.00/GeneralGrackle-500.00.block_list' - SideEffect( - '#/' + test_path + '/MethodGrackle/' + _blocklist_fname, - "grackle_general_simulation.log" - ) - Clean(grackle_cooling_dt, - [Glob('#/' + test_path + '/MethodGrackle/GrackleGeneral-*'), - _full_path_to_node(grackle_general_simulation)] - ) - Depends(grackle_general_simulation, input_grackle_general) - - - # Part 3 of 3: Measure the field summary statistics and compare against - # a table of reference values - _dict_to_json_cmd_arg = lambda d: "'" + str(d).replace('\'', '"') + "'" - if prec == 'double': - _ref_tab = test_path + '/MethodGrackle/ref_general_grackle-double.csv' - atol = 0 - # these should be flexible for different compiler versions - rtol = _dict_to_json_cmd_arg({"min" : 5e-15, "max" : 5e-6, - "mean" : 5e-8, - "standard_deviation" : 5e-8}) - else: - _ref_tab = test_path + '/MethodGrackle/ref_general_grackle-single.csv' - atol = 0 - # the following may need to be relaxed for different compiler versions - rtol = _dict_to_json_cmd_arg( - dict((k, 1e-7) for k in ["min","max","mean","standard_deviation"]) - ) - - _compare_log_path = test_path + '/MethodGrackle/grackle_general_cmp.log' - general_grackle = env.CompareFieldSummary( - target = 'grackle_general.unit', - source = _blocklist_fname, - REF_TABLE = _ref_tab, - ATOL = atol, - RTOL = rtol, - LOG_FILE = _compare_log_path - ) - Depends(general_grackle, grackle_general_simulation) - Clean(grackle_cooling_dt, _compare_log_path) - - # To update the reference values used in this test, execute the following - # commands from the root directory: - # $ ./build.sh test/MethodGrackle/grackle_general_simulation.log - # $ python tools/field_summary.py measure -f pressure,temperature,cooling_time test/MethodGrackle/GeneralGrackle-500.00/GeneralGrackle-500.00.block_list -o test/MethodGrackle/ref_general_grackle-double.csv diff --git a/tools/field_summary.py b/tools/field_summary.py index d9efdf0e87..fba8cf076d 100644 --- a/tools/field_summary.py +++ b/tools/field_summary.py @@ -269,20 +269,39 @@ def represent_as_string(self, colnames): else: return str(dict((col,self[col]) for col in colnames)) -def _process_tol_args(arg_namespace): - # Process the tolerance arguments. They can be a string encoding an - # - int or float (this applies to all columns) - # - JSON object that assoicates tolerances with columns of the summary - # table (columns without entries have a tolerance of 0) - - def _process(tol_name): - try: - tmp = json.loads(getattr(arg_namespace, tol_name)) - except json.JSONDecodeError: - tmp = None + @classmethod + def factory(cls, tol_val, tol_name, parse_from_str = False): + """ + Constructs a ToleranceConfig instance based on `tol_val`. + + Parameters + ---------- + tol_val + This must be an int or float which describes the tolerance for all + columns. Alternatively this can be a dict that associates + tolerances with columns of the summary table (columns without + entries have a tolerance of 0). + tol_name: str + Specifies the type of tolerance that is being processed. This is + only used for more descriptive error messages. + parse_from_str: bool + When True, tol_val is expected to be a string that directly encodes + the int or float value or a string that encodes a JSON object that + corresponds to the dict format described above. + """ + + if parse_from_str: + try: + tmp = json.loads(tol_val) + except json.JSONDecodeError: + tmp = None + elif isinstance(tol_val, cls): + return tol_val + else: + tmp = tol_val if isinstance(tmp, (float, int)): - return ToleranceConfig(fallback_tol = tmp, col_specific_vals = {}) + return cls(fallback_tol = tmp, col_specific_vals = {}) elif isinstance(tmp, dict): for col, val in tmp.items(): # check contents of dict @@ -292,38 +311,55 @@ def _process(tol_name): elif not isinstance(val, (int,float)): raise ValueError(f"{tol_name} for '{col}', {val}, isn't " "an int or float") - return ToleranceConfig(fallback_tol = 0., col_specific_vals = tmp) + return cls(fallback_tol = 0., col_specific_vals = tmp) else: raise ValueError( - f"{tol_name} option expects an int/float or a JSON object " - "that pairs summary table column-names with int/floats. \n" - f"Received: '{getattr(arg_namespace, tol_name)}'" + f"{tol_name} option expects an int/float or a dictionary " + "object that pairs summary table column-names with int/floats. " + f"\n Received: {tol_val!r}" ) - return _process('atol'), _process('rtol') - -def _main_cmp(args): - # Program to use by the cmp subcommand. Just construct the field summary - # table and compare against a reference +def compare_against_reference(snap_path, ref_summary_file_path, + atol, rtol, report_path = None): + """ + Computes summary statistics for fields from the snapshot at the path + specified by snap_path and compares them against reference values + + Parameters + ---------- + snap_path: str + Path to the snapshot that is being tested. + ref_summary_path: str + Path to the csv file holding the reference summary statistics. + atol, rtol + Specifies the absolute and relative tolerances, respectively, for the + comparison. These arguments are allowed to be int or floats (which + apply for all summary statistics), dicts that provide tolerances for + particular summary statistics, or instances of ToleranceConfig. + report_path: str, optional + When specified, this gives a path where an output report is written + describing the outcome of the comparison. + """ + atol = ToleranceConfig.factory(atol, 'atol', parse_from_str = False) + rtol = ToleranceConfig.factory(rtol, 'rtol', parse_from_str = False) + print("Loading the reference table to identify the fields that are to be " "summarized") - ref_field_table = read_field_summary(args.ref) - - atol,rtol = _process_tol_args(args) + ref_field_table = read_field_summary(ref_summary_file_path) field_names = ref_field_table['name'].tolist() print("Measuring the Field Summary Properties") cur_field_table, sim_props = measure_field_summary( - args.target_path, field_names = field_names + snap_path, field_names = field_names ) - if args.report is None: + if report_path is None: report_creator = create_dummy_report else: report_creator = create_test_report - with report_creator(args.report, clobber = True) as tr: + with report_creator(report_path, clobber = True) as tr: print("Comparing field summary tables") test_rslt = test_equivalent_field_tables(cur_field_table, ref_field_table, @@ -335,6 +371,18 @@ def _main_cmp(args): print("Field summary tables are inconsistent") return test_rslt +def _main_cmp(args): + # Program to use by the cmp subcommand. Just construct the field summary + # table and compare against a reference + + atol = ToleranceConfig.factory(args.atol, 'atol', parse_from_str = True) + rtol = ToleranceConfig.factory(args.rtol, 'rtol', parse_from_str = True) + + return compare_against_reference(snap_path = args.target_path, + ref_summary_file_path = args.ref, + atol = atol, rtol = rtol, + report_path = args.report) + # define command line interface! parser = argparse.ArgumentParser( diff --git a/tools/run_cpp_test.py b/tools/run_cpp_test.py index e2b895117a..8fa14e37fd 100644 --- a/tools/run_cpp_test.py +++ b/tools/run_cpp_test.py @@ -26,8 +26,8 @@ help = "the C++ binary that is to be executed" ) parser.add_argument( - "args_for_command", metavar = "ARGS", action = "store", nargs = argparse.REMAINDER, - default = [], + "args_for_command", metavar = "ARGS", action = "store", + nargs = argparse.REMAINDER, default = [], help = "the arguments to the C++ binary that are to be executed" ) @@ -95,26 +95,33 @@ def _num_occurences(pattern, skip_binary_file_search = False): else: return False -if __name__ == '__main__': - args = parser.parse_args() - - if args.output_dump is None: +def run_test_and_check_success(command, args_for_command = [], + dump_path = None): + if dump_path is None: + delete_output = True dump_path = tempfile.mktemp() # path for a temporary file else: - dump_path = args.output_dump + delete_output = False - success = execute_command(command = args.command, - args = args.args_for_command, - output_dump = dump_path) - if not success: - out = 1 - elif log_suggests_test_failure(dump_path): - out = 1 - else: - out = 0 + command_success = execute_command(command = command, + args = args_for_command, + output_dump = dump_path) + success = command_success and not log_suggests_test_failure(dump_path) # cleanup the temporary file - if args.output_dump is None: + if delete_output: os.remove(dump_path) + return success - sys.exit(out) +if __name__ == '__main__': + args = parser.parse_args() + + test_passes = run_test_and_check_success( + command = args.command, args_for_command = args.args_for_command, + dump_path = args.output_dump + ) + + if test_passes: + sys.exit(0) + else: + sys.exit(1)