diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index fac01ab4f..25420efc4 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -3,10 +3,13 @@
# default owners
* @ICB-DCM/pypesto-maintainers
+/doc/example/censored.ipynb @Doresic
/doc/example/hdf5_storage.ipynb @PaulJonasJost
/doc/example/hierarchical.ipynb @dilpath @dweindl
/doc/example/julia.ipynb @PaulJonasJost
/doc/example/model_selection.ipynb @dilpath
+/doc/example/nonlinear_monotone.ipynb @Doresic
+/doc/example/ordinal.ipynb @Doresic
/doc/example/petab_import.ipynb @dweindl @FFroehlich
/doc/example/sampler_study.ipynb @dilpath
/doc/example/sampling_diagnostics.ipynb @dilpath
@@ -16,8 +19,9 @@
/pypesto/engine/ @PaulJonasJost
/pypesto/engine/mpi_pool.py @PaulJonasJost
/pypesto/ensemble/ @dilpath @PaulJonasJost
-/pypesto/hierarchical/ @dweindl @doresic
-/pypesto/hierarchical/optimal_scaling_approach/ @doresic
+/pypesto/hierarchical/ @dweindl @Doresic
+/pypesto/hierarchical/optimal_scaling_approach/ @Doresic
+/pypesto/hierarchical/spline_approximation/ @Doresic
/pypesto/history/ @PaulJonasJost
/pypesto/objective/ @PaulJonasJost
/pypesto/objective/amici/ @dweindl @FFroehlich
@@ -33,7 +37,7 @@
/pypesto/select/ @dilpath
/pypesto/startpoint/ @PaulJonasJost
/pypesto/store/ @PaulJonasJost
-/pypesto/visualize/@stephanmg @m-philipps
+/pypesto/visualize/ @stephanmg @m-philipps
/test/base/ @PaulJonasJost @m-philipps
/test/doc/ @PaulJonasJost
/test/hierarchical/ @dweindl @doresic
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index ae08b6db6..3a70f5765 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -171,6 +171,11 @@ jobs:
matrix:
python-version: ['3.9', '3.11']
+ # needed to allow julia-actions/cache to delete old caches that it has created
+ permissions:
+ actions: write
+ contents: read
+
steps:
- name: Check out repository
uses: actions/checkout@v3
@@ -180,25 +185,27 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
- - name: Install julia
- uses: julia-actions/setup-julia@v1
- with:
- version: 1.9
-
- - name: Cache
+ - name: Cache tox and cache
uses: actions/cache@v3
with:
path: |
~/.cache
.tox/
- ~/.julia/artifacts
key: ${{ runner.os }}-${{ matrix.python-version }}-ci-${{ github.job }}
+ - name: Install julia
+ uses: julia-actions/setup-julia@v1
+ with:
+ version: 1.9
+
+ - name: Cache Julia
+ uses: julia-actions/cache@v1
+
- name: Install dependencies
run: .github/workflows/install_deps.sh
- name: Install PEtabJL dependencies
- run: julia -e 'using Pkg; Pkg.add("PEtab"); Pkg.add("OrdinaryDiffEq"), Pkg.add("Sundials")'
+ run: julia -e 'using Pkg; Pkg.add("PEtab"); Pkg.add("OrdinaryDiffEq"); Pkg.add("Sundials")'
- name: Run tests
timeout-minutes: 25
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index f954fa7b8..be0db31d3 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -6,6 +6,28 @@ Release notes
..........
+0.4.1 (2023-12-05)
+-------------------
+
+* General
+ * Documentation (#1214, #1227, #1223, #1230, #1229)
+ * Update code to avoid deprecations and warnings (#1217, #1219)
+ * Updated codeownership (#1232, #1233)
+ * Update Citation (#1221)
+ * Improved Testing (#1218, #1216, #1231)
+* History:
+ * Enable converting MemoryHistory to Hdf5History (#1211)
+* Profile:
+ * Code simplification and other clean up (#1225)
+ * Fix incorrect indexing in `pypesto.profile.profile_next_guess.get_reg_polynomial` (#1226)
+* Optimize
+ * Warnings for scipy together with laplace prior (#1228)
+* Visualization:
+ * Skip the history trace, if trace is empty. Occurs for infinite initial values. (#1234)
+* Ensemble
+ * Fix Ensemble.from_optimization_endpoints (#1237)
+
+
0.4.0 (2023-11-22)
-------------------
diff --git a/README.md b/README.md
index e6f00ba30..036437888 100644
--- a/README.md
+++ b/README.md
@@ -28,9 +28,9 @@ pyPESTO features include:
* Parameter estimation with ordinal data as described in
[Schmiester et al. (2020)](https://doi.org/10.1007/s00285-020-01522-w) and
[Schmiester et al. (2021)](https://doi.org/10.1093/bioinformatics/btab512).
- ([example](https://github.com/ICB-DCM/pyPESTO/blob/master/doc/example/example_ordinal.ipynb))
-* Parameter estimation with censored data. ([example](https://github.com/ICB-DCM/pyPESTO/blob/master/doc/example/example_censored.ipynb))
-* Parameter estimation with nonlinear-monotone data. ([example](https://github.com/ICB-DCM/pyPESTO/blob/master/doc/example/example_nonlinear_monotone.ipynb))
+ ([example](https://github.com/ICB-DCM/pyPESTO/blob/master/doc/example/ordinal.ipynb))
+* Parameter estimation with censored data. ([example](https://github.com/ICB-DCM/pyPESTO/blob/master/doc/example/censored.ipynb))
+* Parameter estimation with nonlinear-monotone data. ([example](https://github.com/ICB-DCM/pyPESTO/blob/master/doc/example/nonlinear_monotone.ipynb))
## Quick install
@@ -59,20 +59,17 @@ We are happy about any contributions. For more information on how to contribute
to pyPESTO check out
-## Publications
+## How to Cite
**Citeable DOI for the latest pyPESTO release:**
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.2553546.svg)](https://doi.org/10.5281/zenodo.2553546)
-There is a list of [publications using pyPESTO](https://pypesto.readthedocs.io/en/latest/references.html).
-If you used pyPESTO in your work, we are happy to include
-your project, please let us know via a GitHub issue.
-
When using pyPESTO in your project, please cite
* Schälte, Y., Fröhlich, F., Jost, P. J., Vanhoefer, J., Pathirana, D., Stapor, P.,
Lakrisenko, P., Wang, D., Raimúndez, E., Merkt, S., Schmiester, L., Städter, P.,
Grein, S., Dudkin, E., Doresic, D., Weindl, D., & Hasenauer, J. (2023). pyPESTO: A
- modular and scalable tool for parameter estimation for dynamic models [(arXiv:2305.01821)](https://doi.org/10.48550/arXiv.2305.01821).
+ modular and scalable tool for parameter estimation for dynamic models,
+ Bioinformatics, 2023, btad711, [doi:10.1093/bioinformatics/btad711](https://doi.org/10.1093/bioinformatics/btad711)
When presenting work that employs pyPESTO, feel free to use one of the icons in
[doc/logo/](https://github.com/ICB-DCM/pyPESTO/tree/main/doc/logo):
@@ -81,6 +78,10 @@ When presenting work that employs pyPESTO, feel free to use one of the icons in
+There is a list of [publications using pyPESTO](https://pypesto.readthedocs.io/en/latest/references.html).
+If you used pyPESTO in your work, we are happy to include
+your project, please let us know via a GitHub issue.
+
## References
pyPESTO supersedes [**PESTO**](https://github.com/ICB-DCM/PESTO/) a parameter estimation
diff --git a/doc/conf.py b/doc/conf.py
index 56b0d041e..0cca2da1d 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -71,6 +71,10 @@
'autodoc_inherit_docstrings': True,
}
autodoc_mock_imports = ["amici"]
+autodoc_class_signature = "separated"
+
+# napoleon options
+napoleon_use_rtype = False
# links for intersphinx
intersphinx_mapping = {
@@ -90,7 +94,7 @@
typehints_document_rtype = True
autodoc_typehints = "description"
-bibtex_bibfiles = ["using_pypesto.bib"]
+bibtex_bibfiles = ["using_pypesto.bib", "references.bib"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
diff --git a/doc/example.rst b/doc/example.rst
index f48c1d0cb..eb58c6c63 100644
--- a/doc/example.rst
+++ b/doc/example.rst
@@ -52,9 +52,9 @@ Algorithms and features
example/model_selection.ipynb
example/julia.ipynb
example/hierarchical.ipynb
- example/example_ordinal.ipynb
- example/example_censored.ipynb
- example/example_nonlinear_monotone.ipynb
+ example/ordinal.ipynb
+ example/censored.ipynb
+ example/nonlinear_monotone.ipynb
Application examples
--------------------
diff --git a/doc/example/example_censored.ipynb b/doc/example/censored.ipynb
similarity index 99%
rename from doc/example/example_censored.ipynb
rename to doc/example/censored.ipynb
index 771d4831f..28ef1540a 100644
--- a/doc/example/example_censored.ipynb
+++ b/doc/example/censored.ipynb
@@ -495,7 +495,7 @@
"For censored measurements, the `measurement` column will be ignored. For the `Ybar` observable we didn't specify a measurement type, so those will be used as quantitative.\n",
"\n",
"#### Note on inclusion of additional data types:\n",
- "It is possible to include observables with different types of data to the same `petab_problem`. Refer to the notebooks on using [nonlinear-monotone data](example_nonlinear_monotone.ipynb) and [ordinal data](example_ordinal.ipynb) for details on integration of other data types. Additionally, as shown in this example, if the `measurementType` column is left empty for all measurements of an observable, the observable will be treated as quantitative."
+ "It is possible to include observables with different types of data to the same `petab_problem`. Refer to the notebooks on using [nonlinear-monotone data](nonlinear_monotone.ipynb) and [ordinal data](ordinal.ipynb) for details on integration of other data types. Additionally, as shown in this example, if the `measurementType` column is left empty for all measurements of an observable, the observable will be treated as quantitative."
]
},
{
diff --git a/doc/example/example_nonlinear_monotone.ipynb b/doc/example/nonlinear_monotone.ipynb
similarity index 99%
rename from doc/example/example_nonlinear_monotone.ipynb
rename to doc/example/nonlinear_monotone.ipynb
index 9c5881d5c..a1b558efb 100644
--- a/doc/example/example_nonlinear_monotone.ipynb
+++ b/doc/example/nonlinear_monotone.ipynb
@@ -606,7 +606,7 @@
"metadata": {},
"source": [
"#### Note on inclusion of additional data types:\n",
- "It is possible to include observables with different types of data to the same `petab_problem`. Refer to the notebooks on using [ordinal data](example_ordinal.ipynb) and [censored data](example_censored.ipynb) for details on integration of other data types. If the `measurementType` column is left empty for all measurements of an observable, the observable will be treated as quantitative."
+ "It is possible to include observables with different types of data to the same `petab_problem`. Refer to the notebooks on using [ordinal data](ordinal.ipynb) and [censored data](censored.ipynb) for details on integration of other data types. If the `measurementType` column is left empty for all measurements of an observable, the observable will be treated as quantitative."
]
},
{
diff --git a/doc/example/example_ordinal.ipynb b/doc/example/ordinal.ipynb
similarity index 99%
rename from doc/example/example_ordinal.ipynb
rename to doc/example/ordinal.ipynb
index 91c8c3cc9..8cd3736b9 100644
--- a/doc/example/example_ordinal.ipynb
+++ b/doc/example/ordinal.ipynb
@@ -481,7 +481,7 @@
"Measurements with a larger category number will be constrained to be higher in the ordering. Multiple measurements can be assigned to the same category. If this is done, these measurements will be treated as indistinguishable. \n",
"\n",
"#### Note on inclusion of additional data types:\n",
- "It is possible to include observables with different types of data to the same `petab_problem`. Refer to the notebooks on using [nonlinear-monotone data](example_nonlinear_monotone.ipynb) and [censored data](example_censored.ipynb) for details on integration of other data types. If the `measurementType` column is left empty for all measurements of an observable, the observable will be treated as quantitative."
+ "It is possible to include observables with different types of data to the same `petab_problem`. Refer to the notebooks on using [nonlinear-monotone data](nonlinear_monotone.ipynb) and [censored data](censored.ipynb) for details on integration of other data types. If the `measurementType` column is left empty for all measurements of an observable, the observable will be treated as quantitative."
]
},
{
diff --git a/doc/how_to_cite.rst b/doc/how_to_cite.rst
index 53054abd8..0685f3643 100644
--- a/doc/how_to_cite.rst
+++ b/doc/how_to_cite.rst
@@ -7,17 +7,13 @@ How to cite pyPESTO
:target: https://doi.org/10.5281/zenodo.2553546
:alt: pyPESTO release DOI
-
-There is a list of `publications using pyPESTO `_.
-If you used pyPESTO in your work, we are happy to include
-your project, please let us know via a GitHub issue.
-
When using pyPESTO in your project, please cite
- Schälte, Y., Fröhlich, F., Jost, P. J., Vanhoefer, J., Pathirana, D., Stapor, P.,
Lakrisenko, P., Wang, D., Raimúndez, E., Merkt, S., Schmiester, L., Städter, P.,
Grein, S., Dudkin, E., Doresic, D., Weindl, D., & Hasenauer, J. (2023). pyPESTO: A
- modular and scalable tool for parameter estimation for dynamic models `arXiv:2305.01821 `_.
+ modular and scalable tool for parameter estimation for dynamic models,
+ Bioinformatics, 2023;, btad711, https://doi.org/10.1093/bioinformatics/btad711
When presenting work that employs pyPESTO, feel free to use one of the icons in
`doc/logo/ `_:
@@ -26,3 +22,7 @@ When presenting work that employs pyPESTO, feel free to use one of the icons in
:target: https://raw.githubusercontent.com/ICB-DCM/pyPESTO/master/doc/logo/logo.png
:height: 75
:alt: pyPESTO LOGO
+
+There is a list of `publications using pyPESTO `_.
+If you used pyPESTO in your work, we are happy to include
+your project, please let us know via a GitHub issue.
diff --git a/doc/references.bib b/doc/references.bib
new file mode 100644
index 000000000..42fd9dd56
--- /dev/null
+++ b/doc/references.bib
@@ -0,0 +1,67 @@
+
+@Article{EgeaBal2009,
+ author = {Egea, Jose A. and Balsa-Canto, Eva and García, María-Sonia G. and Banga, Julio R.},
+ journal = {Industrial & Engineering Chemistry Research},
+ title = {Dynamic Optimization of Nonlinear Processes with an Enhanced Scatter Search Method},
+ year = {2009},
+ issn = {1520-5045},
+ month = apr,
+ number = {9},
+ pages = {4388--4401},
+ volume = {48},
+ creationdate = {2023-11-21T15:56:38},
+ doi = {10.1021/ie801717t},
+ modificationdate = {2023-11-21T16:27:59},
+ publisher = {American Chemical Society (ACS)},
+}
+
+@Article{EgeaMar2010,
+ author = {Jose A. Egea and Rafael Martí and Julio R. Banga},
+ journal = {Computers & Operations Research},
+ title = {An evolutionary method for complex-process optimization},
+ year = {2010},
+ issn = {0305-0548},
+ number = {2},
+ pages = {315-324},
+ volume = {37},
+ abstract = {In this paper we present a new evolutionary method for complex-process optimization. It is partially based on the principles of the scatter search methodology, but it makes use of innovative strategies to be more effective in the context of complex-process optimization using a small number of tuning parameters. In particular, we introduce a new combination method based on path relinking, which considers a broader area around the population members than previous combination methods. We also use a population-update method which improves the balance between intensification and diversification. New strategies to intensify the search and to escape from suboptimal solutions are also presented. The application of the proposed evolutionary algorithm to different sets of both state-of-the-art continuous global optimization and complex-process optimization problems reveals that it is robust and efficient for the type of problems intended to solve, outperforming the results obtained with other methods found in the literature.},
+ creationdate = {2023-11-21T15:57:20},
+ doi = {10.1016/j.cor.2009.05.003},
+ keywords = {Evolutionary algorithms, Complex-process optimization, Continuous optimization, Global optimization, Metaheuristics},
+ modificationdate = {2023-11-21T15:57:20},
+ url = {https://www.sciencedirect.com/science/article/pii/S0305054809001440},
+}
+
+
+@Article{VillaverdeEge2012,
+ author = {Villaverde, Alejandro F and Egea, Jose A and Banga, Julio R},
+ journal = {BMC Systems Biology},
+ title = {A cooperative strategy for parameter estimation in large scale systems biology models},
+ year = {2012},
+ issn = {1752-0509},
+ month = jun,
+ number = {1},
+ volume = {6},
+ creationdate = {2023-11-21T15:57:46},
+ doi = {10.1186/1752-0509-6-75},
+ modificationdate = {2023-11-21T15:57:46},
+ publisher = {Springer Science and Business Media LLC},
+}
+
+
+@Article{PenasGon2017,
+ author = {Penas, David R. and González, Patricia and Egea, Jose A. and Doallo, Ramón and Banga, Julio R.},
+ journal = {BMC Bioinformatics},
+ title = {Parameter estimation in large-scale systems biology models: a parallel and self-adaptive cooperative strategy},
+ year = {2017},
+ issn = {1471-2105},
+ month = jan,
+ number = {1},
+ volume = {18},
+ creationdate = {2023-11-21T15:57:58},
+ doi = {10.1186/s12859-016-1452-4},
+ modificationdate = {2023-11-21T15:57:58},
+ publisher = {Springer Science and Business Media LLC},
+}
+
+@Comment{jabref-meta: databaseType:bibtex;}
diff --git a/pypesto/engine/base.py b/pypesto/engine/base.py
index f00ce12c3..1da3f09d2 100644
--- a/pypesto/engine/base.py
+++ b/pypesto/engine/base.py
@@ -1,6 +1,6 @@
"""Abstract engine base class."""
import abc
-from typing import Any, List
+from typing import Any
from .task import Task
@@ -13,8 +13,8 @@ def __init__(self):
@abc.abstractmethod
def execute(
- self, tasks: List[Task], progress_bar: bool = True
- ) -> List[Any]:
+ self, tasks: list[Task], progress_bar: bool = True
+ ) -> list[Any]:
"""Execute tasks.
Parameters
@@ -22,6 +22,6 @@ def execute(
tasks:
List of tasks to execute.
progress_bar:
- Whether to display a progress bar.
+ Whether to display a progress bar. Defaults to ``True``.
"""
raise NotImplementedError("This engine is not intended to be called.")
diff --git a/pypesto/engine/mpi_pool.py b/pypesto/engine/mpi_pool.py
index 093d2b525..3027db5a4 100644
--- a/pypesto/engine/mpi_pool.py
+++ b/pypesto/engine/mpi_pool.py
@@ -1,6 +1,6 @@
"""Engines with multi-node parallelization."""
import logging
-from typing import Any, List
+from typing import Any
import cloudpickle as pickle
from mpi4py import MPI
@@ -32,17 +32,21 @@ def __init__(self):
super().__init__()
def execute(
- self, tasks: List[Task], progress_bar: bool = True
- ) -> List[Any]:
+ self, tasks: list[Task], progress_bar: bool = True
+ ) -> list[Any]:
"""
Pickle tasks and distribute work to workers.
Parameters
----------
tasks:
- List of tasks to execute.
+ List of :class:`pypesto.engine.Task` to execute.
progress_bar:
- Whether to display a progress bar.
+ Whether to display a progress bar. Defaults to ``True``.
+
+ Returns
+ -------
+ A list of results.
"""
pickled_tasks = [pickle.dumps(task) for task in tasks]
diff --git a/pypesto/engine/multi_process.py b/pypesto/engine/multi_process.py
index ea050647a..dbd454886 100644
--- a/pypesto/engine/multi_process.py
+++ b/pypesto/engine/multi_process.py
@@ -2,7 +2,7 @@
import logging
import multiprocessing
import os
-from typing import Any, List
+from typing import Any, Union
import cloudpickle as pickle
from tqdm import tqdm
@@ -30,13 +30,17 @@ class MultiProcessEngine(Engine):
Defaults to the number of CPUs available on the system according to
`os.cpu_count()`.
The effectively used number of processes will be the minimum of
- `n_procs` and the number of tasks submitted.
+ `n_procs` and the number of tasks submitted. Defaults to ``None``.
method:
Start method, any of "fork", "spawn", "forkserver", or None,
- giving the system specific default context.
+ giving the system specific default context. Defaults to ``None``.
"""
- def __init__(self, n_procs: int = None, method: str = None):
+ def __init__(
+ self,
+ n_procs: Union[int, None] = None,
+ method: Union[str, None] = None,
+ ):
super().__init__()
if n_procs is None:
@@ -48,16 +52,20 @@ def __init__(self, n_procs: int = None, method: str = None):
self.method: str = method
def execute(
- self, tasks: List[Task], progress_bar: bool = True
- ) -> List[Any]:
+ self, tasks: list[Task], progress_bar: bool = True
+ ) -> list[Any]:
"""Pickle tasks and distribute work over parallel processes.
Parameters
----------
tasks:
- List of tasks to execute.
+ List of :class:`pypesto.engine.Task` to execute.
progress_bar:
- Whether to display a progress bar.
+ Whether to display a progress bar. Defaults to ``True``.
+
+ Returns
+ -------
+ A list of results.
"""
n_tasks = len(tasks)
diff --git a/pypesto/engine/multi_thread.py b/pypesto/engine/multi_thread.py
index 7c0123fb5..25f48354b 100644
--- a/pypesto/engine/multi_thread.py
+++ b/pypesto/engine/multi_thread.py
@@ -3,7 +3,7 @@
import logging
import os
from concurrent.futures import ThreadPoolExecutor
-from typing import Any, List
+from typing import Any, Union
from tqdm import tqdm
@@ -14,7 +14,7 @@
def work(task):
- """Just execute task."""
+ """Execute task."""
return task.execute()
@@ -32,7 +32,7 @@ class MultiThreadEngine(Engine):
`n_threads` and the number of tasks submitted.
"""
- def __init__(self, n_threads: int = None):
+ def __init__(self, n_threads: Union[int, None] = None):
super().__init__()
if n_threads is None:
@@ -43,8 +43,8 @@ def __init__(self, n_threads: int = None):
self.n_threads: int = n_threads
def execute(
- self, tasks: List[Task], progress_bar: bool = True
- ) -> List[Any]:
+ self, tasks: list[Task], progress_bar: bool = True
+ ) -> list[Any]:
"""Deepcopy tasks and distribute work over parallel threads.
Parameters
@@ -53,6 +53,10 @@ def execute(
List of tasks to execute.
progress_bar:
Whether to display a progress bar.
+
+ Returns
+ -------
+ A list of results.
"""
n_tasks = len(tasks)
diff --git a/pypesto/engine/single_core.py b/pypesto/engine/single_core.py
index 75c1cf0d0..d176bd270 100644
--- a/pypesto/engine/single_core.py
+++ b/pypesto/engine/single_core.py
@@ -1,5 +1,5 @@
"""Engines without parallelization."""
-from typing import Any, List
+from typing import Any
from tqdm import tqdm
@@ -11,15 +11,15 @@ class SingleCoreEngine(Engine):
"""
Dummy engine for sequential execution on one core.
- Note that the objective itself may be multithreaded.
+ .. note:: The objective itself may be multithreaded.
"""
def __init__(self):
super().__init__()
def execute(
- self, tasks: List[Task], progress_bar: bool = True
- ) -> List[Any]:
+ self, tasks: list[Task], progress_bar: bool = True
+ ) -> list[Any]:
"""Execute all tasks in a simple for loop sequentially.
Parameters
@@ -28,6 +28,10 @@ def execute(
List of tasks to execute.
progress_bar:
Whether to display a progress bar.
+
+ Returns
+ -------
+ A list of results.
"""
results = []
for task in tqdm(tasks, disable=not progress_bar):
diff --git a/pypesto/engine/task.py b/pypesto/engine/task.py
index 9ba094db7..f48fdcc6a 100644
--- a/pypesto/engine/task.py
+++ b/pypesto/engine/task.py
@@ -8,7 +8,7 @@ class Task(abc.ABC):
Abstract Task class.
A task is one of a list of independent execution tasks that are
- submitted to the execution engine to be executed using the execute()
+ submitted to the execution engine to be executed using the :func:`execute`
method, commonly in parallel.
"""
diff --git a/pypesto/ensemble/ensemble.py b/pypesto/ensemble/ensemble.py
index d3bd07687..2310794c0 100644
--- a/pypesto/ensemble/ensemble.py
+++ b/pypesto/ensemble/ensemble.py
@@ -669,7 +669,12 @@ def from_optimization_endpoints(
# add the parameters from the next start as long as we
# did not reach maximum size and the next value is still
# lower than the cutoff value
- if start['fval'] <= abs_cutoff and len(x_vectors) < max_size:
+ if (
+ start['fval'] <= abs_cutoff
+ and len(x_vectors) < max_size
+ # 'x' can be None if optimization failed at the startpoint
+ and start['x'] is not None
+ ):
x_vectors.append(start['x'][result.problem.x_free_indices])
# the vector tag will be a -1 to indicate it is the last step
diff --git a/pypesto/history/base.py b/pypesto/history/base.py
index 41eab3cad..27f5ef455 100644
--- a/pypesto/history/base.py
+++ b/pypesto/history/base.py
@@ -3,7 +3,7 @@
import numbers
import time
from abc import ABC, abstractmethod
-from typing import Dict, Sequence, Tuple, Union
+from typing import Sequence, Union
import numpy as np
@@ -38,7 +38,7 @@ class HistoryBase(ABC):
# all possible history entries
ALL_KEYS = (X, *RESULT_KEYS, TIME)
- def __init__(self, options: HistoryOptions = None):
+ def __init__(self, options: Union[HistoryOptions, None] = None):
if options is None:
options = HistoryOptions()
options = HistoryOptions.assert_instance(options)
@@ -48,7 +48,7 @@ def __init__(self, options: HistoryOptions = None):
def update(
self,
x: np.ndarray,
- sensi_orders: Tuple[int, ...],
+ sensi_orders: tuple[int, ...],
mode: ModeType,
result: ResultDict,
) -> None:
@@ -70,8 +70,8 @@ def update(
def finalize(
self,
- message: str = None,
- exitflag: str = None,
+ message: Union[str, None] = None,
+ exitflag: Union[str, None] = None,
) -> None:
"""
Finalize history. Called after a run. Default: Do nothing.
@@ -281,7 +281,7 @@ class NoHistory(HistoryBase):
def update( # noqa: D102
self,
x: np.ndarray,
- sensi_orders: Tuple[int, ...],
+ sensi_orders: tuple[int, ...],
mode: ModeType,
result: ResultDict,
) -> None:
@@ -364,7 +364,7 @@ class CountHistoryBase(HistoryBase):
Needs a separate implementation of trace.
"""
- def __init__(self, options: Union[HistoryOptions, Dict] = None):
+ def __init__(self, options: Union[HistoryOptions, dict] = None):
super().__init__(options)
self._n_fval: int = 0
self._n_grad: int = 0
@@ -378,7 +378,7 @@ def __init__(self, options: Union[HistoryOptions, Dict] = None):
def update( # noqa: D102
self,
x: np.ndarray,
- sensi_orders: Tuple[int, ...],
+ sensi_orders: tuple[int, ...],
mode: ModeType,
result: ResultDict,
) -> None:
@@ -386,7 +386,7 @@ def update( # noqa: D102
def _update_counts(
self,
- sensi_orders: Tuple[int, ...],
+ sensi_orders: tuple[int, ...],
mode: ModeType,
):
"""Update the counters."""
@@ -499,8 +499,7 @@ def add_fun_from_res(result: ResultDict) -> ResultDict:
Returns
-------
- full_result:
- Result dicionary, adding whatever is possible to calculate.
+ Result dictionary, adding whatever is possible to calculate.
"""
result = result.copy()
@@ -529,8 +528,7 @@ def reduce_result_via_options(
Returns
-------
- result:
- Result reduced to what is intended to be stored in history.
+ Result reduced to what is intended to be stored in history.
"""
result = result.copy()
diff --git a/pypesto/history/csv.py b/pypesto/history/csv.py
index b7acd7f09..07df30fbf 100644
--- a/pypesto/history/csv.py
+++ b/pypesto/history/csv.py
@@ -3,7 +3,7 @@
import copy
import os
import time
-from typing import Dict, List, Sequence, Tuple, Union
+from typing import Sequence, Union
import numpy as np
import pandas as pd
@@ -41,14 +41,14 @@ class CsvHistory(CountHistoryBase):
options:
History options.
load_from_file:
- If True, history will be initialized from data in the specified file
+ If True, history will be initialized from data in the specified file.
"""
def __init__(
self,
file: str,
x_names: Sequence[str] = None,
- options: Union[HistoryOptions, Dict] = None,
+ options: Union[HistoryOptions, dict] = None,
load_from_file: bool = False,
):
super().__init__(options=options)
@@ -87,16 +87,16 @@ def _update_counts_from_trace(self) -> None:
def update(
self,
x: np.ndarray,
- sensi_orders: Tuple[int, ...],
+ sensi_orders: tuple[int, ...],
mode: ModeType,
result: ResultDict,
) -> None:
- """See `History` docstring."""
+ """See :meth:`HistoryBase.update`."""
super().update(x, sensi_orders, mode, result)
self._update_trace(x, mode, result)
def finalize(self, message: str = None, exitflag: str = None):
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.finalize`."""
super().finalize(message=message, exitflag=exitflag)
self._save_trace(finalize=True)
@@ -167,7 +167,7 @@ def _init_trace(self, x: np.ndarray):
if self.x_names is None:
self.x_names = [f'x{i}' for i, _ in enumerate(x)]
- columns: List[Tuple] = [
+ columns: list[tuple] = [
(c, np.nan)
for c in [
TIME,
@@ -213,7 +213,7 @@ def _init_trace(self, x: np.ndarray):
def _save_trace(self, finalize: bool = False):
"""
- Save to file via pd.DataFrame.to_csv().
+ Save to file via :meth:`pandas.DataFrame.to_csv`.
Only done, if `self.storage_file` is not None and other conditions.
apply.
@@ -243,49 +243,49 @@ def get_x_trace(
ix: Union[int, Sequence[int], None] = None,
trim: bool = False,
) -> Union[Sequence[np.ndarray], np.ndarray]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_x_trace`."""
return list(self._trace[X].values[ix])
@trace_wrap
def get_fval_trace(
self, ix: Union[int, Sequence[int], None], trim: bool = False
) -> Union[Sequence[float], float]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_fval_trace`."""
return list(self._trace[(FVAL, np.nan)].values[ix])
@trace_wrap
def get_grad_trace(
self, ix: Union[int, Sequence[int], None] = None, trim: bool = False
) -> Union[Sequence[MaybeArray], MaybeArray]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_grad_trace`."""
return list(self._trace[GRAD].values[ix])
@trace_wrap
def get_hess_trace(
self, ix: Union[int, Sequence[int], None] = None, trim: bool = False
) -> Union[Sequence[MaybeArray], MaybeArray]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_hess_trace`."""
return list(self._trace[(HESS, np.nan)].values[ix])
@trace_wrap
def get_res_trace(
self, ix: Union[int, Sequence[int], None] = None, trim: bool = False
) -> Union[Sequence[MaybeArray], MaybeArray]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_res_trace`."""
return list(self._trace[(RES, np.nan)].values[ix])
@trace_wrap
def get_sres_trace(
self, ix: Union[int, Sequence[int], None] = None, trim: bool = False
) -> Union[Sequence[MaybeArray], MaybeArray]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_sres_trace`."""
return list(self._trace[(SRES, np.nan)].values[ix])
@trace_wrap
def get_time_trace(
self, ix: Union[int, Sequence[int], None] = None, trim: bool = False
) -> Union[Sequence[float], float]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_time_trace`."""
return list(self._trace[(TIME, np.nan)].values[ix])
@@ -301,7 +301,7 @@ def ndarray2string_full(x: Union[np.ndarray, None]) -> Union[str, None]:
Returns
-------
- x: array as string.
+ Array as string.
"""
if not isinstance(x, np.ndarray):
return x
@@ -320,7 +320,7 @@ def string2ndarray(x: Union[str, float]) -> Union[np.ndarray, float]:
Returns
-------
- x: array as np.ndarray.
+ Array as :class:`numpy.ndarray`.
"""
if not isinstance(x, str):
return x
diff --git a/pypesto/history/generate.py b/pypesto/history/generate.py
index 3c24fdfdc..5fdbbfe4c 100644
--- a/pypesto/history/generate.py
+++ b/pypesto/history/generate.py
@@ -30,8 +30,7 @@ def create_history(
Returns
-------
- history:
- A history object corresponding to the inputs.
+ A history object corresponding to the inputs.
"""
# create different history types based on the inputs
if options.storage_file is None:
diff --git a/pypesto/history/hdf5.py b/pypesto/history/hdf5.py
index e79634821..350b8de6e 100644
--- a/pypesto/history/hdf5.py
+++ b/pypesto/history/hdf5.py
@@ -3,7 +3,9 @@
import contextlib
import time
-from typing import Dict, Sequence, Tuple, Union
+from functools import wraps
+from pathlib import Path
+from typing import Sequence, Union
import h5py
import numpy as np
@@ -53,6 +55,7 @@ def with_h5_file(mode: str):
raise ValueError(f"Mode must be one of {modes}")
def decorator(fun):
+ @wraps(fun)
def wrapper(self, *args, **kwargs):
# file already opened
if self._f is not None and (
@@ -74,8 +77,9 @@ def wrapper(self, *args, **kwargs):
def check_editable(fun):
- """Check if the history is editable."""
+ """Warp function to check whether the history is editable."""
+ @wraps(fun)
def wrapper(self, *args, **kwargs):
if not self.editable:
raise ValueError(
@@ -98,18 +102,18 @@ class Hdf5History(HistoryBase):
file:
HDF5 file name.
options:
- History options.
+ History options. Defaults to ``None``.
"""
def __init__(
self,
id: str,
- file: str,
- options: Union[HistoryOptions, Dict] = None,
+ file: Union[str, Path],
+ options: Union[HistoryOptions, dict, None] = None,
):
super().__init__(options=options)
self.id: str = id
- self.file: str = file
+ self.file: str = str(file)
# filled during file access
self._f: Union[h5py.File, None] = None
@@ -122,11 +126,11 @@ def __init__(
def update(
self,
x: np.ndarray,
- sensi_orders: Tuple[int, ...],
+ sensi_orders: tuple[int, ...],
mode: ModeType,
result: ResultDict,
) -> None:
- """See `History` docstring."""
+ """See :meth:`HistoryBase.update`."""
# check whether the file was marked as editable upon initialization
super().update(x, sensi_orders, mode, result)
self._update_counts(sensi_orders, mode)
@@ -135,14 +139,11 @@ def update(
@with_h5_file("a")
@check_editable
def finalize(self, message: str = None, exitflag: str = None) -> None:
- """See `HistoryBase` docstring."""
+ """See :class:`HistoryBase.finalize`."""
super().finalize()
# add message and exitflag to trace
- f = self._f
- if f'{HISTORY}/{self.id}/{MESSAGES}/' not in f:
- f.create_group(f'{HISTORY}/{self.id}/{MESSAGES}/')
- grp = f[f'{HISTORY}/{self.id}/{MESSAGES}/']
+ grp = self._f.require_group(f'{HISTORY}/{self.id}/{MESSAGES}/')
if message is not None:
grp.attrs[MESSAGE] = message
if exitflag is not None:
@@ -150,7 +151,7 @@ def finalize(self, message: str = None, exitflag: str = None) -> None:
@staticmethod
def load(
- id: str, file: str, options: Union[HistoryOptions, Dict] = None
+ id: str, file: str, options: Union[HistoryOptions, dict] = None
) -> 'Hdf5History':
"""Load the History object from memory."""
history = Hdf5History(id=id, file=file, options=options)
@@ -192,8 +193,8 @@ def _has_non_nan_entries(self, hdf5_group: str) -> bool:
return False
@with_h5_file("a")
- def _update_counts(self, sensi_orders: Tuple[int, ...], mode: ModeType):
- """Update the counters in the hdf5."""
+ def _update_counts(self, sensi_orders: tuple[int, ...], mode: ModeType):
+ """Update the counters in the hdf5 file."""
group = self._require_group()
if mode == MODE_FUN:
@@ -220,7 +221,7 @@ def __len__(self) -> int:
@property
@with_h5_file("r")
def n_fval(self) -> int:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.n_fval`."""
try:
return self._get_group().attrs[N_FVAL]
except KeyError:
@@ -229,7 +230,7 @@ def n_fval(self) -> int:
@property
@with_h5_file("r")
def n_grad(self) -> int:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.n_grad`."""
try:
return self._get_group().attrs[N_GRAD]
except KeyError:
@@ -238,7 +239,7 @@ def n_grad(self) -> int:
@property
@with_h5_file("r")
def n_hess(self) -> int:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.n_hess`."""
try:
return self._get_group().attrs[N_HESS]
except KeyError:
@@ -247,7 +248,7 @@ def n_hess(self) -> int:
@property
@with_h5_file("r")
def n_res(self) -> int:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.n_res`."""
try:
return self._get_group().attrs[N_RES]
except KeyError:
@@ -256,7 +257,7 @@ def n_res(self) -> int:
@property
@with_h5_file("r")
def n_sres(self) -> int:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.n_sres`."""
try:
return self._get_group().attrs[N_SRES]
except KeyError:
@@ -274,7 +275,7 @@ def trace_save_iter(self) -> int:
@property
@with_h5_file("r")
def start_time(self) -> float:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.start_time`."""
# TODO Y This should also be saved in and recovered from the hdf5 file
try:
return self._get_group().attrs[START_TIME]
@@ -303,7 +304,7 @@ def exitflag(self) -> str:
def _update_trace(
self,
x: np.ndarray,
- sensi_orders: Tuple[int],
+ sensi_orders: tuple[int],
mode: ModeType,
result: ResultDict,
) -> None:
@@ -376,7 +377,7 @@ def _get_hdf5_entries(
The key whose trace is returned.
ix:
Index or list of indices of the iterations that will produce
- the trace.
+ the trace. Defaults to ``None``.
Returns
-------
@@ -405,60 +406,55 @@ def _get_hdf5_entries(
def get_x_trace(
self, ix: Union[int, Sequence[int], None] = None, trim: bool = False
) -> Union[Sequence[np.ndarray], np.ndarray]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_x_trace`."""
return self._get_hdf5_entries(X, ix)
@trace_wrap
def get_fval_trace(
self, ix: Union[int, Sequence[int], None] = None, trim: bool = False
) -> Union[Sequence[float], float]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_fval_trace`."""
return self._get_hdf5_entries(FVAL, ix)
@trace_wrap
def get_grad_trace(
self, ix: Union[int, Sequence[int], None] = None, trim: bool = False
) -> Union[Sequence[MaybeArray], MaybeArray]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_grad_trace`."""
return self._get_hdf5_entries(GRAD, ix)
@trace_wrap
def get_hess_trace(
self, ix: Union[int, Sequence[int], None] = None, trim: bool = False
) -> Union[Sequence[MaybeArray], MaybeArray]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_hess_trace`."""
return self._get_hdf5_entries(HESS, ix)
@trace_wrap
def get_res_trace(
self, ix: Union[int, Sequence[int], None] = None, trim: bool = False
) -> Union[Sequence[MaybeArray], MaybeArray]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_res_trace`."""
return self._get_hdf5_entries(RES, ix)
@trace_wrap
def get_sres_trace(
self, ix: Union[int, Sequence[int], None] = None, trim: bool = False
) -> Union[Sequence[MaybeArray], MaybeArray]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_sres_trace`."""
return self._get_hdf5_entries(SRES, ix)
@trace_wrap
def get_time_trace(
self, ix: Union[int, Sequence[int], None] = None, trim: bool = False
) -> Union[Sequence[float], float]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_time_trace`."""
return self._get_hdf5_entries(TIME, ix)
def _editable(self) -> bool:
"""
Check whether the id is already existent in the file.
- Parameters
- ----------
- file:
- HDF5 file name.
-
Returns
-------
True if the file is editable, False otherwise.
@@ -472,3 +468,73 @@ def _editable(self) -> bool:
except OSError:
# if something goes wrong, we assume the file is not editable
return False
+
+ @staticmethod
+ def from_history(
+ other: HistoryBase,
+ file: Union[str, Path],
+ id_: str,
+ overwrite: bool = False,
+ ) -> "Hdf5History":
+ """Write some History to HDF5.
+
+ Parameters
+ ----------
+ other:
+ History to be copied to HDF5.
+ file:
+ HDF5 file to write to (append or create).
+ id_:
+ ID of the history.
+ overwrite:
+ Whether to overwrite an existing history with the same id.
+ Defaults to ``False``.
+
+ Returns
+ -------
+ The newly created :class:`Hdf5History`.
+ """
+ history = Hdf5History(file=file, id=id_)
+ history._f = h5py.File(history.file, mode="a")
+
+ try:
+ if f"{HISTORY}/{history.id}" in history._f:
+ if overwrite:
+ del history._f[f"{HISTORY}/{history.id}"]
+ else:
+ raise RuntimeError(
+ f"ID {history.id} already exists in file {file}."
+ )
+
+ trace_group = history._require_group()
+ trace_group.attrs[N_FVAL] = other.n_fval
+ trace_group.attrs[N_GRAD] = other.n_grad
+ trace_group.attrs[N_HESS] = other.n_hess
+ trace_group.attrs[N_RES] = other.n_res
+ trace_group.attrs[N_SRES] = other.n_sres
+ trace_group.attrs[START_TIME] = other.start_time
+ trace_group.attrs[N_ITERATIONS] = (
+ len(other.get_time_trace()) if other.implements_trace() else 0
+ )
+
+ group = trace_group.parent.require_group(MESSAGES)
+ if other.message is not None:
+ group.attrs[MESSAGE] = other.message
+ if other.exitflag is not None:
+ group.attrs[EXITFLAG] = other.exitflag
+
+ if not other.implements_trace():
+ return history
+
+ for trace_key in (X, FVAL, GRAD, HESS, RES, SRES, TIME):
+ getter = getattr(other, f"get_{trace_key}_trace")
+ trace = getter()
+ for iteration, value in enumerate(trace):
+ trace_group.require_group(str(iteration))[
+ trace_key
+ ] = value
+ finally:
+ history._f.close()
+ history._f = None
+
+ return history
diff --git a/pypesto/history/memory.py b/pypesto/history/memory.py
index 5772c127a..5e61ec825 100644
--- a/pypesto/history/memory.py
+++ b/pypesto/history/memory.py
@@ -1,7 +1,7 @@
"""In-memory history."""
import time
-from typing import Any, Dict, Sequence, Tuple, Union
+from typing import Any, Sequence, Union
import numpy as np
@@ -26,21 +26,22 @@ class MemoryHistory(CountHistoryBase):
Parameters
----------
options:
- History options.
+ History options, see :class:`pypesto.history.HistoryOptions`. Defaults
+ to `None`, which implies default options.
"""
- def __init__(self, options: Union[HistoryOptions, Dict] = None):
+ def __init__(self, options: Union[HistoryOptions, dict, None] = None):
super().__init__(options=options)
- self._trace: Dict[str, Any] = {key: [] for key in HistoryBase.ALL_KEYS}
+ self._trace: dict[str, Any] = {key: [] for key in HistoryBase.ALL_KEYS}
def update(
self,
x: np.ndarray,
- sensi_orders: Tuple[int, ...],
+ sensi_orders: tuple[int, ...],
mode: ModeType,
result: ResultDict,
) -> None:
- """See `History` docstring."""
+ """See :meth:`HistoryBase.update`."""
super().update(x, sensi_orders, mode, result)
self._update_trace(x, mode, result)
@@ -70,7 +71,7 @@ def get_x_trace(
ix: Union[int, Sequence[int], None] = None,
trim: bool = False,
) -> Union[Sequence[np.ndarray], np.ndarray]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_x_trace`."""
return [self._trace[X][i] for i in ix]
@trace_wrap
@@ -79,7 +80,7 @@ def get_fval_trace(
ix: Union[int, Sequence[int], None] = None,
trim: bool = False,
) -> Union[Sequence[float], float]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_fval_trace`."""
return [self._trace[FVAL][i] for i in ix]
@trace_wrap
@@ -88,7 +89,7 @@ def get_grad_trace(
ix: Union[int, Sequence[int], None] = None,
trim: bool = False,
) -> Union[Sequence[MaybeArray], MaybeArray]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_grad_trace`."""
return [self._trace[GRAD][i] for i in ix]
@trace_wrap
@@ -97,7 +98,7 @@ def get_hess_trace(
ix: Union[int, Sequence[int], None] = None,
trim: bool = False,
) -> Union[Sequence[MaybeArray], MaybeArray]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_hess_trace`."""
return [self._trace[HESS][i] for i in ix]
@trace_wrap
@@ -106,7 +107,7 @@ def get_res_trace(
ix: Union[int, Sequence[int], None] = None,
trim: bool = False,
) -> Union[Sequence[MaybeArray], MaybeArray]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_res_trace`."""
return [self._trace[RES][i] for i in ix]
@trace_wrap
@@ -115,7 +116,7 @@ def get_sres_trace(
ix: Union[int, Sequence[int], None] = None,
trim: bool = False,
) -> Union[Sequence[MaybeArray], MaybeArray]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_sres_trace`."""
return [self._trace[SRES][i] for i in ix]
@trace_wrap
@@ -124,5 +125,5 @@ def get_time_trace(
ix: Union[int, Sequence[int], None] = None,
trim: bool = False,
) -> Union[Sequence[float], float]:
- """See `HistoryBase` docstring."""
+ """See :meth:`HistoryBase.get_time_trace`."""
return [self._trace[TIME][i] for i in ix]
diff --git a/pypesto/history/optimizer.py b/pypesto/history/optimizer.py
index 46524a0c1..7dfd76248 100644
--- a/pypesto/history/optimizer.py
+++ b/pypesto/history/optimizer.py
@@ -1,7 +1,7 @@
"""Track optimal values during an optimization."""
import logging
-from typing import Tuple, Union
+from typing import Union
import numpy as np
@@ -46,7 +46,7 @@ class OptimizerHistory:
Lower and upper bound. Used for checking validity of optimal points.
generate_from_history:
If set to true, this function will try to fill attributes of this
- function based on the provided history.
+ function based on the provided history. Defaults to ``False``.
"""
# optimal point values
@@ -84,25 +84,41 @@ def __init__(
def update(
self,
x: np.ndarray,
- sensi_orders: Tuple[int],
+ sensi_orders: tuple[int],
mode: ModeType,
result: ResultDict,
) -> None:
- """Update history and best found value."""
+ """Update history and best found value.
+
+ Parameters
+ ----------
+ x:
+ Current parameter vector.
+ sensi_orders:
+ Sensitivity orders to be evaluated.
+ mode:
+ Mode of the evaluation.
+ result:
+ Current result.
+ """
result = add_fun_from_res(result)
self._update_vals(x, result)
self.history.update(x, sensi_orders, mode, result)
- def finalize(self, message: str = None, exitflag: int = None):
+ def finalize(
+ self,
+ message: Union[str, None] = None,
+ exitflag: Union[int, None] = None,
+ ):
"""
Finalize history.
Parameters
----------
message:
- Optimizer message to be saved.
+ Optimizer message to be saved. Defaults to ``None``.
exitflag:
- Optimizer exitflag to be saved.
+ Optimizer exitflag to be saved. Defaults to ``None``.
"""
self.history.finalize(message=message, exitflag=exitflag)
@@ -136,7 +152,7 @@ def finalize(self, message: str = None, exitflag: int = None):
# issue a warning, as if this happens, then something may be wrong
logger.warning(
f"History has a better point {fval} than the current best "
- "point {self.fval_min}."
+ f"point {self.fval_min}."
)
# update everything
for key in self.MIN_KEYS:
@@ -189,7 +205,7 @@ def _update_vals(self, x: np.ndarray, result: ResultDict) -> None:
def _maybe_compute_init_and_min_vals_from_trace(self) -> None:
"""Try to set initial and best function value from trace.
- Only possible if history has a trace.
+ .. note:: Only possible if history has a trace.
"""
if not len(self.history):
# nothing to be computed from empty history
@@ -220,7 +236,7 @@ def _admissible(self, x: np.ndarray) -> bool:
Returns
-------
- admissible: Whether the point fulfills the problem requirements.
+ Whether the point fulfills the problem requirements.
"""
return np.all(x <= self.ub) and np.all(x >= self.lb)
diff --git a/pypesto/history/options.py b/pypesto/history/options.py
index 22c07403f..d3373824f 100644
--- a/pypesto/history/options.py
+++ b/pypesto/history/options.py
@@ -1,7 +1,7 @@
"""History options."""
from pathlib import Path
-from typing import Dict, Union
+from typing import Union
from ..C import SUFFIXES, SUFFIXES_CSV
from .util import CsvHistoryTemplateError, HistoryTypeError
@@ -18,25 +18,27 @@ class HistoryOptions(dict):
trace_record:
Flag indicating whether to record the trace of function calls.
The trace_record_* flags only become effective if
- trace_record is True.
+ trace_record is True. Defaults to ``False``.
trace_record_grad:
- Flag indicating whether to record the gradient in the trace.
+ Flag indicating whether to record the gradient in the trace. Defaults
+ to ``True``.
trace_record_hess:
- Flag indicating whether to record the Hessian in the trace.
+ Flag indicating whether to record the Hessian in the trace. Defaults
+ to ``True``.
trace_record_res:
Flag indicating whether to record the residual in
- the trace.
+ the trace. Defaults to ``True``.
trace_record_sres:
Flag indicating whether to record the residual sensitivities in
- the trace.
+ the trace. Defaults to ``True``.
trace_save_iter:
- After how many iterations to store the trace.
+ After how many iterations to store the trace. Defaults to ``10``.
storage_file:
File to save the history to. Can be any of None, a
"{filename}.csv", or a "{filename}.hdf5" file. Depending on the values,
- the `create_history` method creates the appropriate object.
+ the :func:`create_history` method creates the appropriate object.
Occurrences of "{id}" in the file name are replaced by the `id`
- upon creation of a history, if applicable.
+ upon creation of a history, if applicable. Defaults to ``None``.
"""
def __init__(
@@ -47,7 +49,7 @@ def __init__(
trace_record_res: bool = True,
trace_record_sres: bool = True,
trace_save_iter: int = 10,
- storage_file: str = None,
+ storage_file: Union[str, None] = None,
):
super().__init__()
@@ -89,14 +91,14 @@ def _sanity_check(self):
@staticmethod
def assert_instance(
- maybe_options: Union['HistoryOptions', Dict],
+ maybe_options: Union['HistoryOptions', dict],
) -> 'HistoryOptions':
"""
Return a valid options object.
Parameters
----------
- maybe_options: HistoryOptions or dict
+ maybe_options: :class:`HistoryOptions` or dict
"""
if isinstance(maybe_options, HistoryOptions):
return maybe_options
diff --git a/pypesto/history/util.py b/pypesto/history/util.py
index 1487e4e4f..8f658962a 100644
--- a/pypesto/history/util.py
+++ b/pypesto/history/util.py
@@ -2,13 +2,13 @@
import numbers
from functools import wraps
-from typing import Dict, Sequence, Union
+from typing import Sequence, Union
import numpy as np
from ..C import SUFFIXES
-ResultDict = Dict[str, Union[float, np.ndarray]]
+ResultDict = dict[str, Union[float, np.ndarray]]
MaybeArray = Union[np.ndarray, 'np.nan']
diff --git a/pypesto/objective/amici/amici.py b/pypesto/objective/amici/amici.py
index d57315e8d..a677abe70 100644
--- a/pypesto/objective/amici/amici.py
+++ b/pypesto/objective/amici/amici.py
@@ -621,5 +621,5 @@ def check_gradients_match_finite_differences(
x = self.amici_object_builder.petab_problem.x_nominal_scaled
x_free = self.amici_object_builder.petab_problem.x_free_indices
return super().check_gradients_match_finite_differences(
- x=x, x_free=x_free, *args, **kwargs
+ *args, x=x, x_free=x_free, **kwargs
)
diff --git a/pypesto/objective/priors.py b/pypesto/objective/priors.py
index 8f340a330..78ce47262 100644
--- a/pypesto/objective/priors.py
+++ b/pypesto/objective/priors.py
@@ -1,3 +1,5 @@
+import logging
+import math
from copy import deepcopy
from typing import Callable, Dict, List, Sequence, Tuple, Union
@@ -8,6 +10,8 @@
from .base import ResultDict
from .function import ObjectiveBase
+logger = logging.getLogger(__name__)
+
class NegLogPriors(AggregatedObjective):
"""
@@ -490,6 +494,11 @@ def res(x):
return np.sqrt(abs(x - mean) / scale)
def d_res_dx(x):
+ if x == mean:
+ logger.warning(
+ "x == mean in d_res_dx of Laplace prior. Returning NaN."
+ )
+ return math.nan
return 1 / 2 * (x - mean) / np.sqrt(scale * abs(x - mean) ** 3)
return log_f, d_log_f_dx, dd_log_f_ddx, res, d_res_dx
diff --git a/pypesto/optimize/__init__.py b/pypesto/optimize/__init__.py
index 7683a935c..4845aa9dd 100644
--- a/pypesto/optimize/__init__.py
+++ b/pypesto/optimize/__init__.py
@@ -6,7 +6,13 @@
Multistart optimization with support for various optimizers.
"""
-from .ess import CESSOptimizer, ESSOptimizer, SacessOptimizer
+from .ess import (
+ CESSOptimizer,
+ ESSOptimizer,
+ SacessFidesFactory,
+ SacessOptimizer,
+ get_default_ess_options,
+)
from .load import (
fill_result_from_history,
optimization_result_from_history,
diff --git a/pypesto/optimize/ess/__init__.py b/pypesto/optimize/ess/__init__.py
index fe4de3fe3..3962f5118 100644
--- a/pypesto/optimize/ess/__init__.py
+++ b/pypesto/optimize/ess/__init__.py
@@ -7,4 +7,8 @@
FunctionEvaluatorMT,
)
from .refset import RefSet
-from .sacess import SacessOptimizer, get_default_ess_options
+from .sacess import (
+ SacessFidesFactory,
+ SacessOptimizer,
+ get_default_ess_options,
+)
diff --git a/pypesto/optimize/ess/cess.py b/pypesto/optimize/ess/cess.py
index 225d8bf6f..0195f95a4 100644
--- a/pypesto/optimize/ess/cess.py
+++ b/pypesto/optimize/ess/cess.py
@@ -23,25 +23,21 @@ class CESSOptimizer:
r"""
Cooperative Enhanced Scatter Search Optimizer (CESS).
- A cooperative scatter search algorithm based on [VillaverdeEge2012]_.
+ A cooperative scatter search algorithm based on :footcite:t:`VillaverdeEge2012`.
In short, multiple scatter search instances with different hyperparameters
are running in different threads/processes, and exchange information.
Some instances focus on diversification while others focus on
intensification. Communication happens at fixed time intervals.
- Proposed hyperparameter values in [VillaverdeEge2012]_:
+ Proposed hyperparameter values in :footcite:t:`VillaverdeEge2012`:
* ``dim_refset``: ``[0.5 n_parameter, 20 n_parameters]``
* ``local_n2``: ``[0, 100]``
* ``balance``: ``[0, 0.5]``
* ``n_diverse``: ``[5 n_par, 20 n_par]``
- * ``max_eval``: such that :math:`\tau = log10(max_eval / n_par)` is in
- [2.5, 3.5], with a recommended default value of 2.5.
-
- .. [VillaverdeEge2012] 'A cooperative strategy for parameter estimation in
- large scale systems biology models', Villaverde, A.F., Egea,
- J.A. & Banga, J.R. BMC Syst Biol 2012, 6, 75.
- https://doi.org/10.1186/1752-0509-6-75
+ * ``max_eval``: such that
+ :math:`\tau = log10(max\_eval / n\_par) \in [2.5, 3.5]`
+ with a recommended default value of :math:`\tau = 2.5`.
Attributes
----------
@@ -65,6 +61,10 @@ class CESSOptimizer:
Starting time of the most recent optimization.
i_iter:
Current iteration number.
+
+ References
+ ----------
+ .. footbibliography::
"""
def __init__(
diff --git a/pypesto/optimize/ess/ess.py b/pypesto/optimize/ess/ess.py
index 4323e207f..4eded5069 100644
--- a/pypesto/optimize/ess/ess.py
+++ b/pypesto/optimize/ess/ess.py
@@ -1,28 +1,7 @@
"""Enhanced Scatter Search.
-See papers on ESS [EgeaBal2009]_ [EgeaMar2010]_, CESS [VillaverdeEge2012]_ and
-saCeSS [PenasGon2017]_.
-
-References
-==========
-
-.. [EgeaBal2009] 'Dynamic Optimization of Nonlinear Processes with an Enhanced
- Scatter Search Method', Jose A. Egea, Eva Balsa-Canto,
- María-Sonia G. García, and Julio R. Banga, Ind. Eng. Chem. Res.
- 2009, 48, 9, 4388–4401. https://doi.org/10.1021/ie801717t
-
-.. [EgeaMar2010] 'An evolutionary method for complex-process optimization',
- Jose A. Egea, Rafael Martí, Julio R. Banga, Computers & Operations Research,
- 2010, 37, 2, 315-324. https://doi.org/10.1016/j.cor.2009.05.003
-
-.. [VillaverdeEge2012] 'A cooperative strategy for parameter estimation in
- large scale systems biology models', Villaverde, A.F., Egea, J.A. & Banga,
- J.R. BMC Syst Biol 2012, 6, 75. https://doi.org/10.1186/1752-0509-6-75
-
-.. [PenasGon2017] 'Parameter estimation in large-scale systems biology models:
- a parallel and self-adaptive cooperative strategy', David R. Penas,
- Patricia González, Jose A. Egea, Ramón Doallo and Julio R. Banga,
- BMC Bioinformatics 2017, 18, 52. https://doi.org/10.1186/s12859-016-1452-4
+See papers on ESS :footcite:p:`EgeaBal2009,EgeaMar2010`,
+CESS :footcite:p:`VillaverdeEge2012`, and saCeSS :footcite:p:`PenasGon2017`.
"""
import enum
import logging
@@ -60,6 +39,11 @@ class ESSExitFlag(int, enum.Enum):
class ESSOptimizer:
"""Enhanced Scatter Search (ESS) global optimization.
+ See papers on ESS :footcite:p:`EgeaBal2009,EgeaMar2010`,
+ CESS :footcite:p:`VillaverdeEge2012`, and saCeSS :footcite:p:`PenasGon2017`.
+
+ .. footbibliography::
+
.. note: Does not implement any constraint handling beyond box constraints
"""
@@ -83,7 +67,7 @@ def __init__(
):
"""Construct new ESS instance.
- For plausible values of hyperparameters, see VillaverdeEge2012.
+ For plausible values of hyperparameters, see :footcite:t:`VillaverdeEge2012`.
Parameters
----------
@@ -104,6 +88,7 @@ def __init__(
In case of a callable, it will be called with the keyword arguments
`max_walltime_s` and `max_eval`, which should be passed to the optimizer
(if supported) to honor the overall budget.
+ See :class:`SacessFidesFactory` for an example.
n_diverse:
Number of samples to choose from to construct the initial RefSet
max_eval:
diff --git a/pypesto/optimize/ess/sacess.py b/pypesto/optimize/ess/sacess.py
index cc2e5eee8..3fd95f050 100644
--- a/pypesto/optimize/ess/sacess.py
+++ b/pypesto/optimize/ess/sacess.py
@@ -9,7 +9,7 @@
from multiprocessing import Manager, Process
from multiprocessing.managers import SyncManager
from pathlib import Path
-from typing import Any, Dict, List, Optional, Tuple, Union
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from uuid import uuid1
from warnings import warn
@@ -25,7 +25,11 @@
from .function_evaluator import create_function_evaluator
from .refset import RefSet
-__all__ = ["SacessOptimizer", "get_default_ess_options"]
+__all__ = [
+ "SacessOptimizer",
+ "get_default_ess_options",
+ "SacessFidesFactory",
+]
logger = logging.getLogger(__name__)
@@ -34,15 +38,17 @@ class SacessOptimizer:
"""SACESS optimizer.
A shared-memory-based implementation of the SaCeSS algorithm presented in
- [PenasGon2017]_. Multiple processes (`workers`) run ESSs in parallel.
+ :footcite:t:`PenasGon2017`. Multiple processes (`workers`) run
+ :class:`enhanced scatter searches (ESSs) ` in parallel.
After each ESS iteration, depending on the outcome, there is a chance
of exchanging good parameters, and changing ESS hyperparameters to those of
- the most promising worker.
+ the most promising worker. See :footcite:t:`PenasGon2017` for details.
+
+ :class:`SacessOptimizer` can be used with or without a local optimizer, but
+ it is highly recommended to use one.
+
+ .. footbibliography::
- .. [PenasGon2017] 'Parameter estimation in large-scale systems biology models:
- a parallel and self-adaptive cooperative strategy', David R. Penas,
- Patricia González, Jose A. Egea, Ramón Doallo and Julio R. Banga,
- BMC Bioinformatics 2017, 18, 52. https://doi.org/10.1186/s12859-016-1452-4
"""
def __init__(
@@ -67,14 +73,21 @@ def __init__(
Resource limits such as ``max_eval`` apply to a single CESS
iteration, not to the full search.
Mutually exclusive with ``num_workers``.
+ Recommended default settings can be obtained from
+ :func:`get_default_ess_options`.
num_workers:
Number of workers to be used. If this argument is given,
(different) default ESS settings will be used for each worker.
Mutually exclusive with ``ess_init_args``.
+ See :func:`get_default_ess_options` for details on the default
+ settings.
max_walltime_s:
Maximum walltime in seconds. Will only be checked between local
optimizations and other simulations, and thus, may be exceeded by
the duration of a local search. Defaults to no limit.
+ Note that in order to impose the wall time limit also on the local
+ optimizer, the user has to provide a wrapper function similar to
+ :meth:`SacessFidesFactory.__call__`.
ess_loglevel:
Loglevel for ESS runs.
sacess_loglevel:
@@ -117,7 +130,16 @@ def minimize(
problem: Problem,
startpoint_method: StartpointMethod = None,
):
- """Solve the given optimization problem."""
+ """Solve the given optimization problem.
+
+ Parameters
+ ----------
+ problem:
+ Minimization problem.
+ startpoint_method:
+ Method for choosing starting points.
+ **Deprecated. Use ``problem.startpoint_method`` instead.**
+ """
if startpoint_method is not None:
warn(
"Passing `startpoint_method` directly is deprecated, use `problem.startpoint_method` instead.",
@@ -696,11 +718,21 @@ def _run_worker(
return worker.run(problem=problem, startpoint_method=startpoint_method)
-def get_default_ess_options(num_workers: int, dim: int) -> List[Dict]:
+def get_default_ess_options(
+ num_workers: int,
+ dim: int,
+ local_optimizer: Union[
+ bool,
+ "pypesto.optimize.Optimizer",
+ Callable[..., "pypesto.optimize.Optimizer"],
+ ] = True,
+) -> List[Dict]:
"""Get default ESS settings for (SA)CESS.
Returns settings for ``num_workers`` parallel scatter searches, combining
- more aggressive and more conservative configurations.
+ more aggressive and more conservative configurations. Mainly intended for
+ use with :class:`SacessOptimizer`. For details on the different options,
+ see keyword arguments of :meth:`ESSOptimizer.__init__`.
Setting appropriate values for ``n_threads`` and ``local_optimizer`` is
left to the user. Defaults to single-threaded and no local optimizer.
@@ -710,7 +742,10 @@ def get_default_ess_options(num_workers: int, dim: int) -> List[Dict]:
Parameters
----------
num_workers: Number of configurations to return.
- dim: Problem dimension.
+ dim: Problem dimension (number of optimized parameters).
+ local_optimizer: The local optimizer to use
+ (see same argument in :class:`ESSOptimizer`), or a boolean indicating
+ whether to set the default local optimizer (currently :class:`FidesOptimizer`).
"""
min_dimrefset = 5
@@ -866,7 +901,76 @@ def dim_refset(x):
'local_n2': 1,
},
]
+
+ # Set local optimizer
+ for cur_settings in settings:
+ if local_optimizer is True:
+ cur_settings['local_optimizer'] = SacessFidesFactory()
+ elif local_optimizer is not False:
+ cur_settings['local_optimizer'] = local_optimizer
+
return [
settings[0],
*(itertools.islice(itertools.cycle(settings[1:]), num_workers - 1)),
]
+
+
+class SacessFidesFactory:
+ """Factory for :class:`FidesOptimizer` instances for use with :class:`SacessOptimizer`.
+
+ :meth:`__call__` will forward the walltime limit and function evaluation
+ limit imposed on :class:`SacessOptimizer` to :class:`FidesOptimizer`.
+ Besides that, default options are used.
+
+
+ Parameters
+ ----------
+ fides_options:
+ Options for the :class:`FidesOptimizer`.
+ See :class:`fides.constants.Options`.
+ fides_kwargs:
+ Keyword arguments for the :class:`FidesOptimizer`. See
+ :meth:`FidesOptimizer.__init__`. Must not include ``options``.
+
+ """
+
+ def __init__(
+ self,
+ fides_options: Optional[dict[str, Any]] = None,
+ fides_kwargs: Optional[dict[str, Any]] = None,
+ ):
+ if fides_options is None:
+ fides_options = {}
+ if fides_kwargs is None:
+ fides_kwargs = {}
+
+ self._fides_options = fides_options
+ self._fides_kwargs = fides_kwargs
+
+ # Check if fides is installed
+ try:
+ import fides # noqa F401
+ except ImportError:
+ from ..optimizer import OptimizerImportError
+
+ raise OptimizerImportError("fides")
+
+ def __call__(
+ self, max_walltime_s: int, max_eval: int
+ ) -> "pypesto.optimize.FidesOptimizer":
+ """Create a :class:`FidesOptimizer` instance."""
+
+ from fides.constants import Options as FidesOptions
+
+ options = self._fides_options.copy()
+ options[FidesOptions.MAXTIME] = max_walltime_s
+
+ # only accepts int
+ if np.isfinite(max_eval):
+ options[FidesOptions.MAXITER] = int(max_eval)
+ return pypesto.optimize.FidesOptimizer(
+ **self._fides_kwargs, options=options
+ )
+
+ def __repr__(self):
+ return f"{self.__class__.__name__}(fides_options={self._fides_options}, fides_kwargs={self._fides_kwargs})"
diff --git a/pypesto/optimize/optimize.py b/pypesto/optimize/optimize.py
index eb0a74e14..af1e2ad8a 100644
--- a/pypesto/optimize/optimize.py
+++ b/pypesto/optimize/optimize.py
@@ -43,22 +43,22 @@ def minimize(
problem:
The problem to be solved.
optimizer:
- The optimizer to be used n_starts times.
+ The optimizer to be used `n_starts` times.
n_starts:
Number of starts of the optimizer.
ids:
Ids assigned to the startpoints.
startpoint_method:
- Method for how to choose start points. False means the optimizer does
- not require start points, e.g. for the 'PyswarmOptimizer'.
+ Method for how to choose start points. ``False`` means the optimizer does
+ not require start points, e.g. for the :class:`pypesto.optimize.PyswarmOptimizer`.
**Deprecated. Use ``problem.startpoint_method`` instead.**
result:
A result object to append the optimization results to. For example,
one might append more runs to a previous optimization. If None,
a new object is created.
engine:
- Parallelization engine. Defaults to sequential execution on a
- SingleCoreEngine.
+ Parallelization engine. Defaults to sequential execution using
+ :class:`pypesto.engine.SingleCoreEngine`.
progress_bar:
Whether to display a progress bar.
options:
@@ -68,18 +68,17 @@ def minimize(
filename:
Name of the hdf5 file, where the result will be saved. Default is
None, which deactivates automatic saving. If set to
- "Auto" it will automatically generate a file named
- `year_month_day_profiling_result.hdf5`.
- Optionally a method, see docs for `pypesto.store.auto.autosave`.
+ ``Auto`` it will automatically generate a file named
+ ``year_month_day_profiling_result.hdf5``.
+ Optionally a method, see docs for :func:`pypesto.store.auto.autosave`.
overwrite:
Whether to overwrite `result/optimization` in the autosave file
if it already exists.
Returns
-------
- result:
- Result object containing the results of all multistarts in
- `result.optimize_result`.
+ Result object containing the results of all multistarts in
+ `result.optimize_result`.
"""
# optimizer
if optimizer is None:
diff --git a/pypesto/optimize/optimizer.py b/pypesto/optimize/optimizer.py
index baa3d8e36..e3f3ae54b 100644
--- a/pypesto/optimize/optimizer.py
+++ b/pypesto/optimize/optimizer.py
@@ -253,10 +253,12 @@ class ScipyOptimizer(Optimizer):
"""
Use the SciPy optimizers.
- Find details on the optimizer and configuration options at:
- https://docs.scipy.org/doc/scipy/reference/generated/scipy.\
- optimize.minimize.html#scipy.optimize.minimize
- """
+ Find details on the optimizer and configuration options at: :func:`scipy.optimize.minimize`.
+
+ .. note::
+ Least-squares optimizers may face errors in case of non-continuous
+ differentiable objective functions (e.g. Laplace priors).
+ """ # noqa
def __init__(
self,
@@ -471,10 +473,13 @@ def is_least_squares(self):
def get_default_options(self):
"""Create default options specific for the optimizer."""
+ options = {'disp': False}
if self.is_least_squares():
- options = {'max_nfev': 1000, 'disp': False}
- else:
- options = {'maxfun': 1000, 'disp': False}
+ options['max_nfev'] = 1000
+ elif self.method.lower() in ('l-bfgs-b', 'tnc'):
+ options['maxfun'] = 1000
+ elif self.method.lower() in ('nelder-mead', 'powell'):
+ options['maxfev'] = 1000
return options
@@ -764,22 +769,20 @@ class ScipyDifferentialEvolutionOptimizer(Optimizer):
"""
Global optimization using scipy's differential evolution optimizer.
- Package homepage: https://docs.scipy.org/doc/scipy/reference/generated\
- /scipy.optimize.differential_evolution.html
+ See: :func:`scipy.optimize.differential_evolution`.
Parameters
----------
options:
Optimizer options that are directly passed on to scipy's optimizer.
-
Examples
--------
Arguments that can be passed to options:
maxiter:
- used to calculate the maximal number of funcion evaluations by
- maxfevals = (maxiter + 1) * popsize * len(x)
+ used to calculate the maximal number of function evaluations by
+ ``maxfevals = (maxiter + 1) * popsize * len(x)``
Default: 100
popsize:
population size, default value 15
@@ -810,7 +813,10 @@ def minimize(
history_options: HistoryOptions = None,
optimize_options: OptimizeOptions = None,
) -> OptimizerResult:
- """Perform optimization. Parameters: see `Optimizer` documentation."""
+ """Perform optimization.
+
+ See :meth:`Optimizer.minimize`.
+ """
bounds = list(zip(problem.lb, problem.ub))
result = scipy.optimize.differential_evolution(
diff --git a/pypesto/petab/importer.py b/pypesto/petab/importer.py
index ce4337794..338631efa 100644
--- a/pypesto/petab/importer.py
+++ b/pypesto/petab/importer.py
@@ -70,12 +70,12 @@ class PetabImporter(AmiciObjectBuilder):
"""
Importer for PEtab files.
- Create an `amici.Model`, an `objective.AmiciObjective` or a
- `pypesto.Problem` from PEtab files. The created objective function is a
+ Create an :class:`amici.amici.Model`, an :class:`pypesto.objective.AmiciObjective` or a
+ :class:`pypesto.problem.Problem` from PEtab files. The created objective function is a
negative log-likelihood function and can thus be negative. The actual
form of the likelihood depends on the noise model specified in the provided PEtab problem.
- For more information, see
- [the PEtab documentation](https://petab.readthedocs.io/en/latest/documentation_data_format.html#noise-distributions)
+ For more information, see the
+ `PEtab documentation `_.
""" # noqa
MODEL_BASE_DIR = "amici_models"
@@ -354,8 +354,8 @@ def compile_model(self, **kwargs):
Parameters
----------
kwargs:
- Extra arguments passed to :meth:`amici.SbmlImporter.sbml2amici`
- or :meth:`amici.pysb_import.pysb2amici`.
+ Extra arguments passed to :meth:`amici.sbml_import.SbmlImporter.sbml2amici`
+ or :func:`amici.pysb_import.pysb2amici`.
"""
# delete output directory
if os.path.exists(self.output_folder):
@@ -380,7 +380,7 @@ def create_solver(self, model: amici.Model = None) -> amici.Solver:
def create_edatas(
self, model: amici.Model = None, simulation_conditions=None
) -> List[amici.ExpData]:
- """Create list of amici.ExpData objects."""
+ """Create list of :class:`amici.amici.ExpData` objects."""
# create model
if model is None:
model = self.create_model()
@@ -399,7 +399,7 @@ def create_objective(
force_compile: bool = False,
**kwargs,
) -> AmiciObjective:
- """Create a :class:`pypesto.AmiciObjective`.
+ """Create a :class:`pypesto.objective.AmiciObjective`.
Parameters
----------
@@ -419,8 +419,7 @@ def create_objective(
Returns
-------
- objective:
- A :class:`pypesto.AmiciObjective` for the model and the data.
+ A :class:`pypesto.objective.AmiciObjective` for the model and the data.
"""
# get simulation conditions
simulation_conditions = petab.get_simulation_conditions(
@@ -568,10 +567,8 @@ def create_predictor(
Returns
-------
- predictor:
- A :class:`pypesto.predict.AmiciPredictor` for the model, using
- the outputs of the AMICI model and the timepoints from the
- PEtab data
+ A :class:`pypesto.predict.AmiciPredictor` for the model, using
+ the outputs of the AMICI model and the timepoints from the PEtab data.
"""
# if the user didn't pass an objective function, we create it first
if objective is None:
@@ -668,18 +665,18 @@ def create_problem(
startpoint_kwargs: Dict[str, Any] = None,
**kwargs,
) -> Problem:
- """Create a :class:`pypesto.Problem`.
+ """Create a :class:`pypesto.problem.Problem`.
Parameters
----------
objective:
- Objective as created by `create_objective`.
+ Objective as created by :meth:`create_objective`.
x_guesses:
Guesses for the parameter values, shape (g, dim), where g denotes
the number of guesses. These are used as start points in the
optimization.
problem_kwargs:
- Passed to the `pypesto.Problem` constructor.
+ Passed to :meth:`pypesto.problem.Problem.__init__`.
startpoint_kwargs:
Keyword arguments forwarded to
:meth:`PetabImporter.create_startpoint_method`.
@@ -689,8 +686,7 @@ def create_problem(
Returns
-------
- problem:
- A :class:`pypesto.Problem` for the objective.
+ A :class:`pypesto.problem.Problem` for the objective.
"""
if objective is None:
objective = self.create_objective(**kwargs)
@@ -779,15 +775,14 @@ def rdatas_to_measurement_df(
----------
rdatas:
A list of rdatas as produced by
- pypesto.AmiciObjective.__call__(x, return_dict=True)['rdatas'].
+ ``pypesto.AmiciObjective.__call__(x, return_dict=True)['rdatas']``.
model:
The amici model.
Returns
-------
- measurement_df:
- A dataframe built from the rdatas in the format as in
- self.petab_problem.measurement_df.
+ A dataframe built from the rdatas in the format as in
+ ``self.petab_problem.measurement_df``.
"""
# create model
if model is None:
@@ -805,9 +800,9 @@ def rdatas_to_simulation_df(
model: amici.Model = None,
) -> pd.DataFrame:
"""
- See `rdatas_to_measurement_df`.
+ See :meth:`rdatas_to_measurement_df`.
- Execpt a petab simulation dataframe is created, i.e. the measurement
+ Except a petab simulation dataframe is created, i.e. the measurement
column label is adjusted.
"""
return self.rdatas_to_measurement_df(rdatas, model).rename(
@@ -828,15 +823,14 @@ def prediction_to_petab_measurement_df(
Parameters
----------
prediction:
- A prediction result as produced by an AmiciPredictor
+ A prediction result as produced by an :class:`pypesto.predict.AmiciPredictor`.
predictor:
- The AmiciPredictor function
+ The :class:`pypesto.predict.AmiciPredictor` instance.
Returns
-------
- measurement_df:
- A dataframe built from the rdatas in the format as in
- self.petab_problem.measurement_df.
+ A dataframe built from the rdatas in the format as in
+ ``self.petab_problem.measurement_df``.
"""
# create rdata-like dicts from the prediction result
@@ -863,7 +857,7 @@ def prediction_to_petab_simulation_df(
predictor: AmiciPredictor = None,
) -> pd.DataFrame:
"""
- See `prediction_to_petab_measurement_df`.
+ See :meth:`prediction_to_petab_measurement_df`.
Except a PEtab simulation dataframe is created, i.e. the measurement
column label is adjusted.
@@ -883,7 +877,7 @@ def _find_output_folder_name(
If available, use the model name from the ``petab_problem`` or the
provided ``model_name`` (latter is given priority), otherwise create a
unique name. The folder will be located in the
- ``PetabImporter.MODEL_BASE_DIR`` subdirectory of the current directory.
+ :obj:`PetabImporter.MODEL_BASE_DIR` subdirectory of the current directory.
"""
# check whether location for amici model is a file
if os.path.exists(PetabImporter.MODEL_BASE_DIR) and not os.path.isdir(
diff --git a/pypesto/profile/approximate.py b/pypesto/profile/approximate.py
index ccddd99d5..34363691d 100644
--- a/pypesto/profile/approximate.py
+++ b/pypesto/profile/approximate.py
@@ -50,8 +50,7 @@ def approximate_parameter_profile(
Returns
-------
- result:
- The profile results are filled into `result.profile_result`.
+ The profile results are filled into `result.profile_result`.
"""
# Handling defaults
# profiling indices
diff --git a/pypesto/profile/profile.py b/pypesto/profile/profile.py
index 116ef21d8..2d0fcceb6 100644
--- a/pypesto/profile/profile.py
+++ b/pypesto/profile/profile.py
@@ -30,7 +30,7 @@ def parameter_profile(
overwrite: bool = False,
) -> Result:
"""
- Call to do parameter profiling.
+ Compute parameter profiles.
Parameters
----------
@@ -45,6 +45,7 @@ def parameter_profile(
The optimizer to be used along each profile.
engine:
The engine to be used.
+ Defaults to :class:`pypesto.engine.SingleCoreEngine`.
profile_index:
List with the parameter indices to be profiled
(by default all free indices).
@@ -61,12 +62,13 @@ def parameter_profile(
:func:`pypesto.profile.profile_next_guess.next_guess`.
profile_options:
Various options applied to the profile optimization.
+ See :class:`pypesto.profile.options.ProfileOptions`.
progress_bar:
Whether to display a progress bar.
filename:
Name of the hdf5 file, where the result will be saved. Default is
None, which deactivates automatic saving. If set to
- "Auto" it will automatically generate a file named
+ ``Auto`` it will automatically generate a file named
``year_month_day_profiling_result.hdf5``.
Optionally a method, see docs for :func:`pypesto.store.auto.autosave`.
overwrite:
@@ -75,8 +77,7 @@ def parameter_profile(
Returns
-------
- result:
- The profile results are filled into `result.profile_result`.
+ The profile results are filled into `result.profile_result`.
"""
# Copy the problem to avoid side effects
problem = copy.deepcopy(problem)
diff --git a/pypesto/profile/profile_next_guess.py b/pypesto/profile/profile_next_guess.py
index 7a2cf5364..0af3df4fc 100644
--- a/pypesto/profile/profile_next_guess.py
+++ b/pypesto/profile/profile_next_guess.py
@@ -1,5 +1,4 @@
-import copy
-from typing import Callable, List, Literal, Tuple, Union
+from typing import Callable, Literal
import numpy as np
@@ -42,10 +41,12 @@ def next_guess(
profile_options:
Various options applied to the profile optimization.
update_type:
- Type of update for next profile point:
- ``fixed_step`` (see :func:`fixed_step`),
- ``adaptive_step_order_0``, ``adaptive_step_order_1``, or ``adaptive_step_regression``
- (see :func:`adaptive_step`).
+ Type of update for next profile point. Available options are:
+
+ * ``fixed_step`` (see :func:`fixed_step`)
+ * ``adaptive_step_order_0`` (see :func:`adaptive_step`).
+ * ``adaptive_step_order_1`` (see :func:`adaptive_step`).
+ * ``adaptive_step_regression`` (see :func:`adaptive_step`).
current_profile:
The profile which should be computed.
problem:
@@ -55,8 +56,7 @@ def next_guess(
Returns
-------
- next_guess:
- The next initial guess as base for the next profile point.
+ The next initial guess as base for the next profile point.
"""
if update_type == 'fixed_step':
return fixed_step(
@@ -96,16 +96,16 @@ def fixed_step(
"""Most simple method to create the next guess.
Computes the next point based on the fixed step size given by
- ``default_step_size`` in :class:`ProfileOptions`.
+ :attr:`pypesto.profile.ProfileOptions.default_step_size`.
Parameters
----------
x:
The current position of the profiler, size `dim_full`.
par_index:
- The index of the parameter of the current profile
+ The index of the parameter of the current profile.
par_direction:
- The direction, in which the profiling is done (``1`` or ``-1``)
+ The direction, in which the profiling is done (``1`` or ``-1``).
options:
Various options applied to the profile optimization.
problem:
@@ -113,8 +113,7 @@ def fixed_step(
Returns
-------
- x_new:
- The updated parameter vector, of size `dim_full`.
+ The updated parameter vector, of size `dim_full`.
"""
delta_x = np.zeros(len(x))
delta_x[par_index] = par_direction * options.default_step_size
@@ -150,41 +149,42 @@ def adaptive_step(
x:
The current position of the profiler, size `dim_full`.
par_index:
- The index of the parameter of the current profile
+ The index of the parameter of the current profile.
par_direction:
- The direction, in which the profiling is done (1 or -1)
+ The direction, in which the profiling is done (``1`` or ``-1``).
options:
Various options applied to the profile optimization.
current_profile:
- The profile which should be computed
+ The profile which should be computed.
problem:
The problem to be solved.
global_opt:
- log-posterior value of the global optimum
+ Log-posterior value of the global optimum.
order:
- Specifies the precise algorithm for extrapolation: can be ``0`` (
- just one parameter is updated), ``1`` (last two points used to
- extrapolate all parameters), and ``np.nan`` (indicates that a more
- complex regression should be used)
+ Specifies the precise algorithm for extrapolation.
+ Available options are:
+
+ * ``0``: just one parameter is updated
+ * ``1``: the last two points are used to extrapolate all parameters
+ * ``np.nan``: indicates that a more complex regression should be used
+ as determined by :attr:`pypesto.profile.ProfileOptions.reg_order`.
+
Returns
-------
- x_new:
- The updated parameter vector, of size `dim_full`.
+ The updated parameter vector, of size `dim_full`.
"""
# restrict step proposal to minimum and maximum step size
def clip_to_minmax(step_size_proposal):
- return clip(
+ return np.clip(
step_size_proposal, options.min_step_size, options.max_step_size
)
# restrict step proposal to bounds
def clip_to_bounds(step_proposal):
- return clip(step_proposal, problem.lb_full, problem.ub_full)
+ return np.clip(step_proposal, problem.lb_full, problem.ub_full)
- # check if this is the first step
- n_profile_points = len(current_profile.fval_path)
problem.fix_parameters(par_index, x[par_index])
# Get update directions and first step size guesses
@@ -197,7 +197,6 @@ def clip_to_bounds(step_proposal):
x,
par_index,
par_direction,
- n_profile_points,
global_opt,
order,
current_profile,
@@ -208,43 +207,47 @@ def clip_to_bounds(step_proposal):
# check whether we must make a minimum step anyway, since we're close to
# the next bound
min_delta_x = x[par_index] + par_direction * options.min_step_size
+
if par_direction == -1 and (min_delta_x < problem.lb_full[par_index]):
step_length = problem.lb_full[par_index] - x[par_index]
return x + step_length * delta_x_dir
- elif par_direction == 1 and (min_delta_x > problem.ub_full[par_index]):
+
+ if par_direction == 1 and (min_delta_x > problem.ub_full[par_index]):
step_length = problem.ub_full[par_index] - x[par_index]
return x + step_length * delta_x_dir
# parameter extrapolation function
- def par_extrapol(step_length):
- # Do we have enough points to do a regression?
- if np.isnan(order) and n_profile_points > 2:
- x_step_tmp = []
+ n_profile_points = len(current_profile.fval_path)
+
+ # Do we have enough points to do a regression?
+ if np.isnan(order) and n_profile_points > 2:
+
+ def par_extrapol(step_length):
+ x_step = []
# loop over parameters, extrapolate each one
for i_par in range(problem.dim_full):
if i_par == par_index:
# if we meet the profiling parameter, just increase,
# don't extrapolate
- x_step_tmp.append(
- x[par_index] + step_length * par_direction
- )
+ x_step.append(x[par_index] + step_length * par_direction)
elif i_par in problem.x_fixed_indices:
# common fixed parameter: will be ignored anyway later
- x_step_tmp.append(np.nan)
+ x_step.append(np.nan)
else:
# extrapolate
cur_par_extrapol = np.poly1d(reg_par[i_par])
- x_step_tmp.append(
+ x_step.append(
cur_par_extrapol(
x[par_index] + step_length * par_direction
)
)
- x_step = np.array(x_step_tmp)
- else:
- # if we do simple extrapolation
- x_step = x + step_length * delta_x_dir
+ return clip_to_bounds(x_step)
- return clip_to_bounds(x_step)
+ else:
+ # if not, we do simple extrapolation
+ def par_extrapol(step_length):
+ x_step = x + step_length * delta_x_dir
+ return clip_to_bounds(x_step)
# compute proposal
next_x = par_extrapol(step_size_guess)
@@ -265,7 +268,6 @@ def par_extrapol(step_length):
return do_line_search(
next_x,
step_size_guess,
- "decrease" if next_obj_target < next_obj else "increase",
par_extrapol,
next_obj,
next_obj_target,
@@ -280,19 +282,31 @@ def par_extrapol(step_length):
def handle_profile_history(
x: np.ndarray,
par_index: int,
- par_direction: int,
- n_profile_points: int,
+ par_direction: Literal[1, -1],
global_opt: float,
order: int,
current_profile: ProfilerResult,
problem: Problem,
options: ProfileOptions,
-) -> Tuple:
+) -> tuple[float, np.array, list[float], float]:
"""Compute the very first step direction update guesses.
Check whether enough steps have been taken for applying regression,
computes regression or simple extrapolation.
+
+ Returns
+ -------
+ step_size_guess:
+ Guess for the step size.
+ delta_x_dir:
+ Parameter update direction.
+ reg_par:
+ The regression polynomial for profile extrapolation.
+ delta_obj_value:
+ The difference of the objective function value between the last point and `global_opt`.
"""
+ n_profile_points = len(current_profile.fval_path)
+
# set the update direction
delta_x_dir = np.zeros(len(x))
delta_x_dir[par_index] = par_direction
@@ -320,29 +334,28 @@ def handle_profile_history(
delta_x_dir = last_delta_x / step_size_guess
elif np.isnan(order):
# compute the regression polynomial for parameter extrapolation
-
reg_par = get_reg_polynomial(
- n_profile_points, par_index, current_profile, problem, options
+ par_index, current_profile, problem, options
)
return step_size_guess, delta_x_dir, reg_par, delta_obj_value
def get_reg_polynomial(
- n_profile_points: int,
par_index: int,
current_profile: ProfilerResult,
problem: Problem,
options: ProfileOptions,
-) -> List[float]:
+) -> list[float]:
"""Compute the regression polynomial.
- Used to step proposal extrapolation from the last profile points
+ Used to step proposal extrapolation from the last profile points.
"""
# determine interpolation order
+ n_profile_points = len(current_profile.fval_path)
reg_max_order = np.floor(n_profile_points / 2)
- reg_order = np.min([reg_max_order, options.reg_order])
- reg_points = np.min([n_profile_points, options.reg_points])
+ reg_order = min(reg_max_order, options.reg_order)
+ reg_points = min(n_profile_points, options.reg_points)
# set up matrix of regression parameters
reg_par = []
@@ -355,8 +368,8 @@ def get_reg_polynomial(
# Do polynomial interpolation of profile path
# Determine rank of polynomial interpolation
regression_tmp = np.polyfit(
- current_profile.x_path[par_index, -1:-reg_points:-1],
- current_profile.x_path[i_par, -1:-reg_points:-1],
+ current_profile.x_path[par_index, -reg_points:],
+ current_profile.x_path[i_par, -reg_points:],
reg_order,
full=True,
)
@@ -365,8 +378,8 @@ def get_reg_polynomial(
if regression_tmp[2] < reg_order:
reg_order = regression_tmp[2]
regression_tmp = np.polyfit(
- current_profile.x_path[par_index, -reg_points:-1],
- current_profile.x_path[i_par, -reg_points:-1],
+ current_profile.x_path[par_index, -reg_points:],
+ current_profile.x_path[i_par, -reg_points:],
int(reg_order),
full=True,
)
@@ -380,7 +393,6 @@ def get_reg_polynomial(
def do_line_search(
next_x: np.ndarray,
step_size_guess: float,
- direction: Literal['increase', 'decrease'],
par_extrapol: Callable,
next_obj: float,
next_obj_target: float,
@@ -394,36 +406,64 @@ def do_line_search(
Based on the objective function we want to reach, based on the current
position in parameter space and on the first guess for the proposal.
+
+ Parameters
+ ----------
+ next_x:
+ Starting parameters for the line search.
+ step_size_guess:
+ First guess for the step size.
+ par_extrapol:
+ Parameter extrapolation function.
+ next_obj:
+ Objective function value at `next_x`.
+ next_obj_target:
+ Objective function value we want to reach.
+ clip_to_minmax:
+ Function to clip the step size to minimum and maximum step size.
+ clip_to_bounds:
+ Function to clip the parameters to the bounds.
+ par_index:
+ Index of the parameter we are profiling.
+ problem:
+ The parameter estimation problem.
+ options:
+ Profile likelihood options.
+
+ Returns
+ -------
+ Parameter vector that is expected to yield the objective function value
+ closest to `next_obj_target`.
"""
# Was the initial step too big or too small?
+ direction = "decrease" if next_obj_target < next_obj else "increase"
if direction == 'increase':
adapt_factor = options.step_size_factor
else:
adapt_factor = 1 / options.step_size_factor
# Loop until correct step size was found
- stop_search = False
- while not stop_search:
+ while True:
# Adapt step size of guess
- last_x = copy.copy(next_x)
+ last_x = next_x
step_size_guess = clip_to_minmax(step_size_guess * adapt_factor)
next_x = clip_to_bounds(par_extrapol(step_size_guess))
# Check if we hit the bounds
- hit_bounds = (
+ if (
direction == 'decrease'
and step_size_guess == options.min_step_size
- ) or (
+ ):
+ return next_x
+ if (
direction == 'increase'
and step_size_guess == options.max_step_size
- )
-
- if hit_bounds:
+ ):
return next_x
# compute new objective value
problem.fix_parameters(par_index, next_x[par_index])
- last_obj = copy.copy(next_obj)
+ last_obj = next_obj
next_obj = problem.objective(problem.get_reduced_vector(next_x))
# check for root crossing and compute correct step size in case
@@ -448,22 +488,3 @@ def next_x_interpolate(
# fix final guess and return
return last_x + add_x
-
-
-def clip(
- vector_guess: Union[float, np.ndarray],
- lower: Union[float, np.ndarray],
- upper: Union[float, np.ndarray],
-) -> Union[float, np.ndarray]:
- """Restrict a scalar or a vector to given bounds.
-
- ``vector_guess`` is modified in-place if it is an array.
- """
- if isinstance(vector_guess, float):
- return np.max([np.min([vector_guess, upper]), lower])
-
- for i_par, i_guess in enumerate(vector_guess):
- vector_guess[i_par] = np.max(
- [np.min([i_guess, upper[i_par]]), lower[i_par]]
- )
- return vector_guess
diff --git a/pypesto/profile/task.py b/pypesto/profile/task.py
index 4c4ea08f0..0cdbfaddc 100644
--- a/pypesto/profile/task.py
+++ b/pypesto/profile/task.py
@@ -1,5 +1,5 @@
import logging
-from typing import Callable
+from typing import Any, Callable
import pypesto.optimize
@@ -55,7 +55,7 @@ def __init__(
self.i_par = i_par
self.options = options
- def execute(self) -> 'pypesto.profile.ProfilerResult':
+ def execute(self) -> dict[str, Any]:
"""Compute profile in descending and ascending direction."""
logger.debug(f"Executing task {self.i_par}.")
diff --git a/pypesto/profile/util.py b/pypesto/profile/util.py
index 350d64459..ceac7ffde 100644
--- a/pypesto/profile/util.py
+++ b/pypesto/profile/util.py
@@ -1,5 +1,5 @@
"""Utility function for profile module."""
-from typing import Any, Dict, Iterable, Tuple
+from typing import Any, Iterable
import numpy as np
import scipy.stats
@@ -25,8 +25,7 @@ def chi2_quantile_to_ratio(alpha: float = 0.95, df: int = 1):
Returns
-------
- ratio:
- Corresponds to a likelihood ratio.
+ The computed likelihood ratio threshold.
"""
quantile = scipy.stats.chi2.ppf(alpha, df=df)
ratio = np.exp(-quantile / 2)
@@ -35,11 +34,11 @@ def chi2_quantile_to_ratio(alpha: float = 0.95, df: int = 1):
def calculate_approximate_ci(
xs: np.ndarray, ratios: np.ndarray, confidence_ratio: float
-) -> Tuple[float, float]:
+) -> tuple[float, float]:
"""
Calculate approximate confidence interval based on profile.
- Interval bounds are linerly interpolated.
+ Interval bounds are linearly interpolated.
Parameters
----------
@@ -50,12 +49,11 @@ def calculate_approximate_ci(
The likelihood ratios corresponding to the parameter values.
confidence_ratio:
Minimum confidence ratio to base the confidence interval upon, as
- obtained via `pypesto.profile.chi2_quantile_to_ratio`.
+ obtained via :func:`pypesto.profile.chi2_quantile_to_ratio`.
Returns
-------
- lb, ub:
- Bounds of the approximate confidence interval.
+ Bounds of the approximate confidence interval.
"""
# extract indices where the ratio is larger than the minimum ratio
(indices,) = np.where(ratios >= confidence_ratio)
@@ -147,7 +145,7 @@ def initialize_profile(
def fill_profile_list(
profile_result: ProfileResult,
- optimizer_result: Dict[str, Any],
+ optimizer_result: dict[str, Any],
profile_index: Iterable[int],
profile_list: int,
problem_dimension: int,
diff --git a/pypesto/profile/validation_intervals.py b/pypesto/profile/validation_intervals.py
index b77bcbd3d..aff7f6aa3 100644
--- a/pypesto/profile/validation_intervals.py
+++ b/pypesto/profile/validation_intervals.py
@@ -33,8 +33,8 @@ def validation_profile_significance(
The reasoning behind their approach is, that a validation data set
is outside the validation interval, if fitting the full data set
- would lead to a fit $\theta_{new}$, that does not contain the old
- fit $\theta_{train}$ in their (Profile-Likelihood) based
+ would lead to a fit :math:`\theta_{new}`, that does not contain the old
+ fit :math:`\theta_{train}` in their (Profile-Likelihood) based
parameter-confidence intervals. (I.e. the old fit would be rejected by
the fit of the full data.)
@@ -50,34 +50,28 @@ def validation_profile_significance(
problem_full_data:
pypesto.problem, such that the objective is the
negative-log-likelihood of the training and validation data set.
-
result_training_data:
- result object from the fitting of the training data set only.
-
+ Result object from the fitting of the training data set only.
result_full_data
- pypesto.result object that contains the result of fitting
+ Result object that contains the result of fitting
training and validation data combined.
-
n_starts
number of starts for fitting the full data set
- (if result_full_data is not provided).
-
+ (if `result_full_data` is not provided).
optimizer:
- optimizer used for refitting the data (if result_full_data is not
+ optimizer used for refitting the data (if `result_full_data` is not
provided).
-
- engine
- engine for refitting (if result_full_data is not provided).
-
+ engine:
+ engine for refitting (if `result_full_data` is not provided).
lsq_objective:
- indicates if the objective of problem_full_data corresponds to a nllh
- (False), or a chi^2 value (True).
+ indicates if the objective of `problem_full_data` corresponds to a nllh
+ (``False``), or a :math:`\chi^2` value (``True``).
return_significance:
- indicates, if the function should return the significance (True) (i.e.
+ indicates, if the function should return the significance (``True``) (i.e.
the probability, that the new data set lies outside the Confidence
Interval for the validation experiment, as given by the method), or
the largest alpha, such that the validation experiment still lies
- within the Confidence Interval (False). I.e. alpha = 1-significance.
+ within the Confidence Interval (``False``). I.e. :math:`\alpha = 1-significance`.
.. [#Kreutz] Kreutz, Clemens, Raue, Andreas and Timmer, Jens.
diff --git a/pypesto/profile/walk_along_profile.py b/pypesto/profile/walk_along_profile.py
index 14ae47a3d..202019ea7 100644
--- a/pypesto/profile/walk_along_profile.py
+++ b/pypesto/profile/walk_along_profile.py
@@ -1,5 +1,5 @@
import logging
-from typing import Callable
+from typing import Callable, Literal
import numpy as np
@@ -15,7 +15,7 @@
def walk_along_profile(
current_profile: ProfilerResult,
problem: Problem,
- par_direction: int,
+ par_direction: Literal[1, -1],
optimizer: Optimizer,
options: ProfileOptions,
create_next_guess: Callable,
@@ -54,11 +54,10 @@ def walk_along_profile(
Returns
-------
- current_profile:
- The current profile, modified in-place.
+ The current profile, modified in-place.
"""
- # create variables which are needed during iteration
- stop_profile = False
+ if par_direction not in (-1, 1):
+ raise AssertionError("par_direction must be -1 or 1")
# while loop for profiling (will be exited by break command)
while True:
@@ -67,18 +66,16 @@ def walk_along_profile(
# check if the next profile point needs to be computed
# ... check bounds
- if par_direction == -1:
- stop_profile = x_now[i_par] <= problem.lb_full[[i_par]]
- elif par_direction == 1:
- stop_profile = x_now[i_par] >= problem.ub_full[[i_par]]
- else:
- raise AssertionError("par_direction must be -1 or 1")
+ if par_direction == -1 and x_now[i_par] <= problem.lb_full[[i_par]]:
+ break
+ if par_direction == 1 and x_now[i_par] >= problem.ub_full[[i_par]]:
+ break
# ... check likelihood ratio
- if not options.whole_path:
- stop_profile |= current_profile.ratio_path[-1] < options.ratio_min
-
- if stop_profile:
+ if (
+ not options.whole_path
+ and current_profile.ratio_path[-1] < options.ratio_min
+ ):
break
# compute the new start point for optimization
@@ -92,10 +89,9 @@ def walk_along_profile(
global_opt,
)
- # fix current profiling parameter to current value and set
- # start point
+ # fix current profiling parameter to current value and set start point
problem.fix_parameters(i_par, x_next[i_par])
- startpoint = np.array([x_next[i] for i in problem.x_free_indices])
+ startpoint = x_next[problem.x_free_indices]
# run optimization
if startpoint.size > 0:
@@ -113,12 +109,8 @@ def walk_along_profile(
if np.isfinite(optimizer_result.fval):
break
- profiled_par_id = problem.x_names[i_par]
- profiled_par_value = startpoint[
- problem.x_free_indices.index(i_par)
- ]
logger.warning(
- f"Optimization at {profiled_par_id}={profiled_par_value} failed."
+ f"Optimization at {problem.x_names[i_par]}={x_next[i_par]} failed."
)
# sample a new starting point for another attempt
# might be preferable to stay close to the previous point, at least initially,
diff --git a/pypesto/sample/geweke_test.py b/pypesto/sample/geweke_test.py
index 0cbe89d2a..989f6f281 100644
--- a/pypesto/sample/geweke_test.py
+++ b/pypesto/sample/geweke_test.py
@@ -199,9 +199,7 @@ def burn_in_by_sequential_geweke(
alpha2 = zscore * np.ones((len(idxs)))
for i in range(len(max_z)):
- alpha2[idxs[i]] = alpha2[idxs[i]] / (
- len(fragments) - np.where(idxs == i)[0] + 1
- )
+ alpha2[idxs[i]] /= len(fragments) - np.argwhere(idxs == i).item(0) + 1
if np.any(alpha2 > max_z):
burn_in = (np.where(alpha2 > max_z)[0][0]) * step
diff --git a/pypesto/version.py b/pypesto/version.py
index 6a9beea82..3d26edf77 100644
--- a/pypesto/version.py
+++ b/pypesto/version.py
@@ -1 +1 @@
-__version__ = "0.4.0"
+__version__ = "0.4.1"
diff --git a/pypesto/visualize/optimizer_history.py b/pypesto/visualize/optimizer_history.py
index 534b109f4..55ce47ef1 100644
--- a/pypesto/visualize/optimizer_history.py
+++ b/pypesto/visualize/optimizer_history.py
@@ -295,6 +295,10 @@ def get_trace(
else: # trace_x == TRACE_X_STEPS:
x_vals = np.array(list(range(len(indices))))
+ # if the trace is empty, skip
+ if len(x_vals) == 0:
+ continue
+
# write down values
vals.append(np.vstack([x_vals, y_vals]))
diff --git a/pypesto/visualize/profiles.py b/pypesto/visualize/profiles.py
index de12743a5..f2077ceef 100644
--- a/pypesto/visualize/profiles.py
+++ b/pypesto/visualize/profiles.py
@@ -1,4 +1,4 @@
-from typing import Sequence, Tuple, Union
+from typing import Optional, Sequence, Union
from warnings import warn
import matplotlib.pyplot as plt
@@ -15,7 +15,7 @@ def profiles(
results: Union[Result, Sequence[Result]],
ax=None,
profile_indices: Sequence[int] = None,
- size: Sequence[float] = (18.5, 6.5),
+ size: tuple[float, float] = (18.5, 6.5),
reference: Union[ReferencePoint, Sequence[ReferencePoint]] = None,
colors=None,
legends: Sequence[str] = None,
@@ -23,7 +23,7 @@ def profiles(
profile_list_ids: Union[int, Sequence[int]] = 0,
ratio_min: float = 0.0,
show_bounds: bool = False,
-):
+) -> plt.Axes:
"""
Plot classical 1D profile plot.
@@ -31,26 +31,26 @@ def profiles(
Parameters
----------
- results: list or pypesto.Result
+ results:
List of or single `pypesto.Result` after profiling.
- ax: list of matplotlib.Axes, optional
+ ax:
List of axes objects to use.
- profile_indices: list of integer values
+ profile_indices:
List of integer values specifying which profiles should be plotted.
- size: tuple, optional
+ size:
Figure size (width, height) in inches. Is only applied when no ax
object is specified.
- reference: list, optional
+ reference:
List of reference points for optimization results, containing at
least a function value fval.
- colors: list, or RGBA, optional
+ colors:
List of colors, or single color.
- legends: list or str, optional
+ legends:
Labels for line plots, one label per result object.
- x_labels: list of str
+ x_labels:
Labels for parameter value axes (e.g. parameter names).
- profile_list_ids: int or list of ints, optional
- Index or list of indices of the profile lists to be used for profiling.
+ profile_list_ids:
+ Index or list of indices of the profile lists to visualize.
ratio_min:
Minimum ratio below which to cut off.
show_bounds:
@@ -58,7 +58,7 @@ def profiles(
Returns
-------
- ax: matplotlib.Axes
+ ax:
The plot axes.
"""
# parse input
@@ -122,16 +122,16 @@ def profiles(
def profiles_lowlevel(
- fvals,
- ax=None,
- size: Tuple[float, float] = (18.5, 6.5),
+ fvals: Union[float, Sequence[float]],
+ ax: Optional[Sequence[plt.Axes]] = None,
+ size: tuple[float, float] = (18.5, 6.5),
color=None,
legend_text: str = None,
x_labels=None,
show_bounds: bool = False,
- lb_full=None,
- ub_full=None,
-):
+ lb_full: Sequence[float] = None,
+ ub_full: Sequence[float] = None,
+) -> list[plt.Axes]:
"""
Lowlevel routine for profile plotting.
@@ -139,21 +139,16 @@ def profiles_lowlevel(
Parameters
----------
- fvals: numeric list or array
+ fvals:
Values to plot.
- ax: list of matplotlib.Axes, optional
+ ax:
List of axes object to use.
- size: tuple, optional
- Figure size (width, height) in inches. Is only applied when no ax
- object is specified.
- size: tuple, optional
+ size:
Figure size (width, height) in inches. Is only applied when no ax
object is specified.
color: RGBA, optional
Color for profiles in plot.
- legend_text: str
- Label for line plots.
- legend_text: List[str]
+ legend_text:
Label for line plots.
show_bounds:
Whether to show, and extend the plot to, the lower and upper bounds.
@@ -164,8 +159,7 @@ def profiles_lowlevel(
Returns
-------
- ax: matplotlib.Axes
- The plot axes.
+ The plot axes.
"""
# axes
if ax is None:
@@ -179,7 +173,7 @@ def profiles_lowlevel(
create_new_ax = False
# count number of necessary axes
- if isinstance(fvals, list):
+ if isinstance(fvals, Sequence):
n_fvals = len(fvals)
else:
n_fvals = 1
@@ -269,30 +263,30 @@ def profiles_lowlevel(
def profile_lowlevel(
- fvals,
- ax=None,
- size: Tuple[float, float] = (18.5, 6.5),
+ fvals: Sequence[float],
+ ax: Optional[plt.Axes] = None,
+ size: tuple[float, float] = (18.5, 6.5),
color=None,
legend_text: str = None,
show_bounds: bool = False,
lb: float = None,
ub: float = None,
-):
+) -> plt.Axes:
"""
Lowlevel routine for plotting one profile, working with a numpy array only.
Parameters
----------
- fvals: numeric list or array
+ fvals:
Values to plot.
- ax: matplotlib.Axes, optional
+ ax:
Axes object to use.
- size: tuple, optional
+ size:
Figure size (width, height) in inches. Is only applied when no ax
object is specified.
color: RGBA, optional
Color for profiles in plot.
- legend_text: str
+ legend_text:
Label for line plots.
show_bounds:
Whether to show, and extend the plot to, the lower and upper bounds.
@@ -303,8 +297,7 @@ def profile_lowlevel(
Returns
-------
- ax: matplotlib.Axes
- The plot axes.
+ The plot axes.
"""
# parse input
fvals = np.asarray(fvals)
@@ -372,28 +365,27 @@ def handle_inputs(
profile_indices: Sequence[int],
profile_list: int,
ratio_min: float,
-):
+) -> list[np.array]:
"""
Retrieve the values of the profiles to be plotted.
Parameters
----------
- result: pypesto.Result
+ result:
Profile result obtained by 'profile.py'.
- profile_indices: list of integer values
- List of integer values specifying which profiles should be plotted.
- profile_list: int, optional
+ profile_indices:
+ Sequence of integer values specifying which profiles should be plotted.
+ profile_list:
Index of the profile list to be used for profiling.
- ratio_min: int, optional
+ ratio_min:
Exclude values where profile likelihood ratio is smaller than
ratio_min.
Returns
-------
- fvals: numeric list
- Including values that need to be plotted.
+ List of parameter values and ratios that need to be plotted.
"""
- # extract ratio values values from result
+ # extract ratio values from result
fvals = []
for i_par in range(0, len(result.profile_result.list[profile_list])):
if (
@@ -437,8 +429,7 @@ def process_result_list_profiles(
List of or single `pypesto.Result` after profiling.
profile_list_ids: int or list of ints, optional
Index or list of indices of the profile lists to be used for profiling.
- colors: list of RGBA colors
- colors for
+ colors: list of RGBA colors for plotting.
legends: list of str
Legends for plotting
@@ -506,6 +497,7 @@ def process_profile_indices(
else:
for ind in profile_indices:
if ind not in plottable_indices:
+ profile_indices = list(profile_indices)
profile_indices.remove(ind)
warn(
'Requested to plot profile for parameter index %i, '
diff --git a/pypesto/visualize/reference_points.py b/pypesto/visualize/reference_points.py
index 13d8a03d9..c7aae3e5c 100644
--- a/pypesto/visualize/reference_points.py
+++ b/pypesto/visualize/reference_points.py
@@ -1,4 +1,4 @@
-from typing import List
+from typing import Optional, Sequence, Union
import numpy as np
@@ -14,7 +14,7 @@ class ReferencePoint(dict):
Attributes
----------
- x: ndarray
+ x:
Reference parameters.
fval: float
Function value, fun(x), for reference parameters.
@@ -28,7 +28,12 @@ class ReferencePoint(dict):
"""
def __init__(
- self, reference=None, x=None, fval=None, color=None, legend=None
+ self,
+ reference: Union[None, dict, tuple, "ReferencePoint"] = None,
+ x: Optional[Sequence] = None,
+ fval: Optional[float] = None,
+ color=None,
+ legend: Optional[str] = None,
):
super().__init__()
@@ -104,19 +109,18 @@ def __getattr__(self, key):
__delattr__ = dict.__delitem__
-def assign_colors(ref):
+def assign_colors(ref: Sequence[ReferencePoint]) -> Sequence[ReferencePoint]:
"""
Assign colors to reference points, depending on user settings.
Parameters
----------
- ref: list of ReferencePoint
+ ref:
Reference points, which need to get their color property filled
Returns
-------
- ref: list of ReferencePoint
- Reference points, which got their color property filled
+ Reference points, which got their color property filled
"""
# loop over reference points
auto_color_count = 0
@@ -141,7 +145,7 @@ def assign_colors(ref):
def create_references(
references=None, x=None, fval=None, color=None, legend=None
-) -> List[ReferencePoint]:
+) -> list[ReferencePoint]:
"""
Create a list of reference point objects from user inputs.
diff --git a/setup.cfg b/setup.cfg
index 327aa82d2..64f714e15 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -76,6 +76,7 @@ all =
%(pymc)s
%(aesara)s
%(jax)s
+ %(julia)s
%(emcee)s
%(dynesty)s
%(mltools)s
diff --git a/test/base/test_history.py b/test/base/test_history.py
index 20ad64684..8338206aa 100644
--- a/test/base/test_history.py
+++ b/test/base/test_history.py
@@ -9,6 +9,7 @@
import numpy as np
import pytest
import scipy.optimize as so
+from numpy.testing import assert_array_almost_equal
import pypesto
import pypesto.optimize as optimize
@@ -716,3 +717,51 @@ def test_trim_history():
fval_trimmed_man.append(fval_i)
fval_current = fval_i
assert fval_trace_trimmed == fval_trimmed_man
+
+
+def test_hd5_history_from_other(history: pypesto.HistoryBase):
+ """Check that we can copy different histories to HDF5 and that the re-loaded history matches the original one."""
+ hdf5_file = tempfile.mkstemp(suffix='.h5')[1]
+ pypesto.Hdf5History.from_history(history, hdf5_file, id_="0")
+
+ # write a second time to test `overwrite` argument
+ with pytest.raises(RuntimeError, match="already exists"):
+ pypesto.Hdf5History.from_history(
+ history, hdf5_file, id_="0", overwrite=False
+ )
+ copied = pypesto.Hdf5History.from_history(
+ history, hdf5_file, id_="0", overwrite=True
+ )
+
+ assert copied.n_fval == history.n_fval
+ assert copied.n_grad == history.n_grad
+ assert copied.n_hess == history.n_hess
+ assert copied.n_res == history.n_res
+ assert copied.n_sres == history.n_sres
+ assert copied.exitflag == history.exitflag
+ assert copied.message == history.message
+ assert copied.start_time == history.start_time
+
+ if history.implements_trace():
+ assert_array_almost_equal(copied.get_x_trace(), history.get_x_trace())
+ assert_array_almost_equal(
+ copied.get_fval_trace(), history.get_fval_trace()
+ )
+ assert_array_almost_equal(
+ copied.get_grad_trace(), history.get_grad_trace()
+ )
+ assert_array_almost_equal(
+ copied.get_time_trace(), history.get_time_trace()
+ )
+ assert_array_almost_equal(
+ copied.get_res_trace(), history.get_res_trace()
+ )
+ assert_array_almost_equal(
+ copied.get_sres_trace(), history.get_sres_trace()
+ )
+ assert_array_almost_equal(
+ copied.get_chi2_trace(), history.get_chi2_trace()
+ )
+ assert_array_almost_equal(
+ copied.get_schi2_trace(), history.get_schi2_trace()
+ )
diff --git a/test/optimize/test_optimize.py b/test/optimize/test_optimize.py
index 28d6a670b..00b160314 100644
--- a/test/optimize/test_optimize.py
+++ b/test/optimize/test_optimize.py
@@ -18,6 +18,13 @@
import pypesto
import pypesto.optimize as optimize
+from pypesto.optimize.ess import (
+ CESSOptimizer,
+ ESSOptimizer,
+ SacessFidesFactory,
+ SacessOptimizer,
+ get_default_ess_options,
+)
from pypesto.optimize.util import assign_ids
from pypesto.store import read_result
@@ -47,11 +54,13 @@ def problem(request) -> pypesto.Problem:
'Powell',
'CG',
'BFGS',
+ 'dogleg',
'Newton-CG',
'L-BFGS-B',
'TNC',
'COBYLA',
'SLSQP',
+ 'trust-constr',
'trust-ncg',
'trust-exact',
'trust-krylov',
@@ -59,7 +68,7 @@ def problem(request) -> pypesto.Problem:
'ls_dogbox',
]
],
- # disabled: ,'trust-constr', 'ls_lm', 'dogleg'
+ # disabled: 'ls_lm' (ValueError when passing bounds)
('ipopt', ''),
('dlib', ''),
('pyswarm', ''),
@@ -135,7 +144,10 @@ def problem(request) -> pypesto.Problem:
@pytest.fixture(
params=optimizers,
- ids=[f"{i}-{o[0]}" for i, o in enumerate(optimizers)],
+ ids=[
+ f"{i}-{o[0]}{'-' + str(o[1]) if isinstance(o[1], str) and o[1] else ''}"
+ for i, o in enumerate(optimizers)
+ ],
)
def optimizer(request):
return request.param
@@ -242,7 +254,8 @@ def get_optimizer(library, solver):
options = {'maxiter': 100}
if library == 'scipy':
- options['maxfun'] = options.pop('maxiter')
+ if solver == "TNC" or solver.startswith("ls_"):
+ options['maxfun'] = options.pop('maxiter')
optimizer = optimize.ScipyOptimizer(method=solver, options=options)
elif library == 'ipopt':
optimizer = optimize.IpoptOptimizer()
@@ -445,16 +458,12 @@ def test_history_beats_optimizer():
"ignore:Passing `startpoint_method` directly is deprecated.*:DeprecationWarning"
)
@pytest.mark.parametrize("ess_type", ["ess", "cess", "sacess"])
-@pytest.mark.parametrize("local_optimizer", [None, optimize.FidesOptimizer()])
+@pytest.mark.parametrize(
+ "local_optimizer",
+ [None, optimize.FidesOptimizer(), SacessFidesFactory()],
+)
@pytest.mark.flaky(reruns=3)
def test_ess(problem, local_optimizer, ess_type, request):
- from pypesto.optimize.ess import (
- CESSOptimizer,
- ESSOptimizer,
- SacessOptimizer,
- get_default_ess_options,
- )
-
if ess_type == "ess":
ess = ESSOptimizer(
dim_refset=10,
diff --git a/test/petab/test_amici_objective.py b/test/petab/test_amici_objective.py
index 1055bd446..537d07b47 100644
--- a/test/petab/test_amici_objective.py
+++ b/test/petab/test_amici_objective.py
@@ -49,6 +49,7 @@ def test_add_sim_grad_to_opt_grad():
assert np.allclose(expected, opt_grad)
+@pytest.mark.flaky(reruns=2)
def test_error_leastsquares_with_ssigma():
model_name = "Zheng_PNAS2012"
petab_problem = petab.Problem.from_yaml(
@@ -57,7 +58,9 @@ def test_error_leastsquares_with_ssigma():
petab_problem.model_name = model_name
importer = pypesto.petab.PetabImporter(petab_problem)
obj = importer.create_objective()
- problem = importer.create_problem(obj)
+ problem = importer.create_problem(
+ obj, startpoint_kwargs={'check_fval': True, 'check_grad': True}
+ )
optimizer = pypesto.optimize.ScipyOptimizer(
'ls_trf', options={'max_nfev': 50}
diff --git a/test/run_notebook.sh b/test/run_notebook.sh
index d7236e6dd..0059e8e24 100755
--- a/test/run_notebook.sh
+++ b/test/run_notebook.sh
@@ -25,8 +25,9 @@ nbs_1=(
'store.ipynb'
'synthetic_data.ipynb'
'hierarchical.ipynb'
- 'example_ordinal.ipynb'
- 'example_nonlinear_monotone.ipynb'
+ 'ordinal.ipynb'
+ 'censored.ipynb'
+ 'nonlinear_monotone.ipynb'
)
# Sampling notebooks