Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Select: support user-provided calibration results #1338

Open
wants to merge 9 commits into
base: develop
Choose a base branch
from
71 changes: 54 additions & 17 deletions pypesto/select/method.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@
import numpy as np
import petab_select
from petab_select import (
CANDIDATE_SPACE,
MODELS,
PREDECESSOR_MODEL,
UNCALIBRATED_MODELS,
VIRTUAL_INITIAL_MODEL,
CandidateSpace,
Criterion,
Expand Down Expand Up @@ -213,6 +217,11 @@ class MethodCaller:
Specify the predecessor (initial) model for the model selection
algorithm. If ``None``, then the algorithm will generate an initial
predecessor model if required.
user_calibrated_models:
Supply calibration results for models yourself, as a list of models.
If a model with the same hash is encountered in the current model
selection run, and the user-supplied calibrated model has the
`criterion` value set, the model will not be calibrated again.
select_first_improvement:
If ``True``, model selection will terminate as soon as a better model
is found. If `False`, all candidate models will be tested.
Expand Down Expand Up @@ -245,6 +254,7 @@ def __init__(
# TODO deprecated
model_to_pypesto_problem_method: Callable[[Any], Problem] = None,
model_problem_options: dict = None,
user_calibrated_models: list[Model] = None,
):
"""Arguments are used in every `__call__`, unless overridden."""
self.petab_select_problem = petab_select_problem
Expand All @@ -256,6 +266,12 @@ def __init__(
self.select_first_improvement = select_first_improvement
self.startpoint_latest_mle = startpoint_latest_mle

self.user_calibrated_models = {}
if user_calibrated_models is not None:
self.user_calibrated_models = {
model.get_hash(): model for model in user_calibrated_models
}

self.logger = MethodLogger()

# TODO deprecated
Expand Down Expand Up @@ -366,39 +382,60 @@ def __call__(
# All calibrated models in this iteration (see second return value).
self.logger.new_selection()

candidate_space = petab_select.ui.candidates(
iteration = petab_select.ui.start_iteration(
problem=self.petab_select_problem,
candidate_space=self.candidate_space,
limit=self.limit,
calibrated_models=self.calibrated_models,
newly_calibrated_models=newly_calibrated_models,
excluded_model_hashes=self.calibrated_models.keys(),
# FIXME confirm old results are reproducible after this change
# calibrated_models=self.calibrated_models,
# newly_calibrated_models=newly_calibrated_models,
# excluded_model_hashes=self.calibrated_models.keys(),
criterion=self.criterion,
user_calibrated_models=self.user_calibrated_models,
)
predecessor_model = self.candidate_space.predecessor_model

if not candidate_space.models:
if not iteration[UNCALIBRATED_MODELS]:
raise StopIteration("No valid models found.")

# TODO parallelize calibration (maybe not sensible if
# `self.select_first_improvement`)
newly_calibrated_models = {}
for candidate_model in candidate_space.models:
# autoruns calibration
self.new_model_problem(model=candidate_model)
newly_calibrated_models[
candidate_model.get_hash()
] = candidate_model
calibrated_models = {}
for model in iteration[UNCALIBRATED_MODELS]:
if (
model.get_criterion(
criterion=self.criterion,
compute=True,
raise_on_failure=False,
)
is not None
):
self.logger.log(
message=(
"Unexpected calibration result already available for "
f"model: `{model.get_hash()}`. Skipping "
"calibration."
),
level="warning",
)
else:
self.new_model_problem(model=model)

calibrated_models[model.get_hash()] = model
method_signal = self.handle_calibrated_model(
model=candidate_model,
predecessor_model=predecessor_model,
model=model,
predecessor_model=iteration[PREDECESSOR_MODEL],
)
if method_signal.proceed == MethodSignalProceed.STOP:
break

self.calibrated_models.update(newly_calibrated_models)
iteration_results = petab_select.ui.end_iteration(
candidate_space=iteration[CANDIDATE_SPACE],
calibrated_models=calibrated_models,
)

self.calibrated_models.update(iteration_results[MODELS])

return predecessor_model, newly_calibrated_models
return iteration[PREDECESSOR_MODEL], iteration_results[MODELS]

def handle_calibrated_model(
self,
Expand Down
6 changes: 4 additions & 2 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -166,10 +166,12 @@ example =
notebook >= 6.1.4
benchmark_models_petab @ git+https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab.git@master#subdirectory=src/python
select =
# Remove when vis is moved to PEtab Select version
# Remove when vis is moved to PEtab Select
networkx >= 2.5.1
# End remove
petab-select >= 0.1.12
#petab-select >= 0.1.12
# FIXME before merge
petab-select @ git+https://github.com/PEtab-dev/petab_select.git@user_calibrated_models
test =
pytest >= 5.4.3
pytest-cov >= 2.10.0
Expand Down
Loading