Skip to content

Commit

Permalink
MPIEvaluator: Model passing
Browse files Browse the repository at this point in the history
  • Loading branch information
EwoutH committed Aug 31, 2023
1 parent c72fc6c commit 469ffe6
Showing 1 changed file with 16 additions and 10 deletions.
26 changes: 16 additions & 10 deletions ema_workbench/em_framework/evaluators.py
Original file line number Diff line number Diff line change
Expand Up @@ -434,23 +434,29 @@ def finalize(self):

def evaluate_experiments(self, scenarios, policies, callback, combine="factorial"):
ex_gen = experiment_generator(scenarios, self._msis, policies, combine=combine)
experiments_list = list(ex_gen) # Convert generator to list
model_gen = (self._msis[experiment.model_name] for experiment in experiments_list)
experiments = list(ex_gen) # Convert generator to list

# Here, we're using the map function from MPIPoolExecutor. This function behaves
# like the built-in map, but the tasks are executed in parallel processes.
# Depending on how your experiments and callback are structured,
# you may need to adjust this to fit your exact needs.
# Create the model map just like in SequentialEvaluator
models = NamedObjectMap(AbstractModel)
models.extend(self._msis)

# Pack models with each experiment
packed = [(experiment, models) for experiment in experiments]

results = self._pool.map(run_experiment_mpi, ex_gen, model_gen)
# Use the pool to execute in parallel
results = self._pool.map(run_experiment_mpi, packed)

for experiment, outcomes in results:
callback(experiment, outcomes)

def run_experiment_mpi(experiment, model):
model_for_experiment = {experiment.model_name: model}
runner = ExperimentRunner(model_for_experiment)

def run_experiment_mpi(packed_data):
experiment, all_models = packed_data
model = all_models[experiment.model_name]

runner = ExperimentRunner({experiment.model_name: model})
outcomes = runner.run_experiment(experiment)

return experiment, outcomes


Expand Down

0 comments on commit 469ffe6

Please sign in to comment.