From c8af95429675055c3fca08c0c64e98219916720c Mon Sep 17 00:00:00 2001 From: Philipp Ross Date: Wed, 17 Jan 2024 11:26:39 +0100 Subject: [PATCH] Add option to keep same application instance across the benchmark repetitions --- docs/tutorial.rst | 7 +++++++ src/BenchmarkManager.py | 9 ++++++--- tests/configs/valid/TSP.yml | 3 +++ 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/docs/tutorial.rst b/docs/tutorial.rst index f4597b47..a416c9ad 100644 --- a/docs/tutorial.rst +++ b/docs/tutorial.rst @@ -154,6 +154,8 @@ Example for a config file: application: config: + regenerate: + - False nodes: - 3 name: TSP @@ -169,6 +171,11 @@ Example for a config file: One handy thing to do is to use the interactive mode once to create a config file. Then you can change the values of this config file and use it to start the framework. +The ``regenerate`` key can be set for every application and defines whether you want to regenerate the application +instance for every repetition. When this key is missing, the default value is ``True``. This option can be handy if you +are using random seeds in the creation of the application instance and you want to have the same instance for every +repetition. If you want to change the default behavior for your application, you have to add the ``regenerate`` key to +your ``get_parameter_options`` function. Run as Container diff --git a/src/BenchmarkManager.py b/src/BenchmarkManager.py index a6010cd1..d78f0e62 100644 --- a/src/BenchmarkManager.py +++ b/src/BenchmarkManager.py @@ -35,7 +35,6 @@ comm = get_comm() - class BenchmarkManager: """ The benchmark manager is the main component of QUARK, orchestrating the overall benchmarking process. @@ -133,6 +132,9 @@ def run_benchmark(self, benchmark_backlog: list, repetitions: int): Path(path).mkdir(parents=True, exist_ok=True) with open(f"{path}/application_config.json", 'w') as filehandler: json.dump(backlog_item["config"], filehandler, indent=2) + + problem = None + preprocessing_time = None for i in range(1, repetitions + 1): logging.info(f"Running backlog item {idx_backlog + 1}/{len(benchmark_backlog)}," f" Iteration {i}/{repetitions}:") @@ -143,8 +145,9 @@ def run_benchmark(self, benchmark_backlog: list, repetitions: int): git_revision_number, git_uncommitted_changes, i, repetitions) self.application.metrics.set_module_config(backlog_item["config"]) - problem, preprocessing_time = self.application.preprocess(None, backlog_item["config"], - store_dir=path, rep_count=i) + if problem is None or backlog_item["config"].get("regenerate", True): + problem, preprocessing_time = self.application.preprocess(None, backlog_item["config"], + store_dir=path, rep_count=i) self.application.metrics.set_preprocessing_time(preprocessing_time) self.application.save(path, i) diff --git a/tests/configs/valid/TSP.yml b/tests/configs/valid/TSP.yml index e48f53f2..f3b70855 100644 --- a/tests/configs/valid/TSP.yml +++ b/tests/configs/valid/TSP.yml @@ -1,5 +1,8 @@ application: config: + regenerate: + - True + - False nodes: - 6 name: TSP