diff --git a/.github/actions/install-apt-dependencies/action.yml b/.github/actions/install-apt-dependencies/action.yml
new file mode 100644
index 0000000000..0474e5befa
--- /dev/null
+++ b/.github/actions/install-apt-dependencies/action.yml
@@ -0,0 +1,16 @@
+name: Install apt dependencies
+description: Install apt dependencies for the AMICI Python package
+runs:
+ using: "composite"
+ steps:
+ - run: |
+ sudo apt-get update \
+ && sudo apt-get install -y \
+ g++ \
+ libatlas-base-dev \
+ libboost-chrono-dev \
+ libboost-math-dev \
+ libboost-serialization-dev \
+ libhdf5-serial-dev \
+ swig
+ shell: bash
diff --git a/.github/actions/setup-amici-cpp/action.yml b/.github/actions/setup-amici-cpp/action.yml
new file mode 100644
index 0000000000..09ec9311bf
--- /dev/null
+++ b/.github/actions/setup-amici-cpp/action.yml
@@ -0,0 +1,44 @@
+name: Set up AMICI C++
+description: |
+ Build the AMICI C++ interface and set things for for coverage analysis.
+ (Currently ubuntu-only).
+
+runs:
+ using: "composite"
+ steps:
+ # BioNetGen Path
+ - run: echo "BNGPATH=${GITHUB_WORKSPACE}/ThirdParty/BioNetGen-2.7.0" >> $GITHUB_ENV
+ shell: bash
+
+ # use all available cores
+ - run: echo "AMICI_PARALLEL_COMPILE=" >> $GITHUB_ENV
+ shell: bash
+
+ # enable coverage
+ - run: echo "ENABLE_GCOV_COVERAGE=TRUE" >> $GITHUB_ENV
+ shell: bash
+
+ - name: Set up Sonar tools
+ uses: ./.github/actions/setup-sonar-tools
+
+ - name: Install apt dependencies
+ uses: ./.github/actions/install-apt-dependencies
+
+ - name: Install additional apt dependencies
+ run: |
+ sudo apt-get update \
+ && sudo apt-get install -y \
+ cmake \
+ python3-venv \
+ lcov
+ shell: bash
+
+ - name: Build AMICI dependencies
+ run: scripts/buildDependencies.sh
+ shell: bash
+
+ - name: Build AMICI
+ run: scripts/buildAmici.sh
+ shell: bash
+ env:
+ CI_SONARCLOUD: "TRUE"
diff --git a/.github/actions/setup-doxygen/action.yml b/.github/actions/setup-doxygen/action.yml
new file mode 100644
index 0000000000..f6a62181c1
--- /dev/null
+++ b/.github/actions/setup-doxygen/action.yml
@@ -0,0 +1,20 @@
+name: Set up doxygen
+description: |
+ Download, build, and install doxygen.
+
+runs:
+ using: "composite"
+ steps:
+ - name: Install apt dependencies for doxygen
+ run: |
+ sudo apt-get update \
+ && sudo apt-get install -y \
+ bison \
+ ragel \
+ graphviz \
+ texlive-latex-extra
+ shell: bash
+
+ - name: Download and build doxygen
+ run: sudo scripts/downloadAndBuildDoxygen.sh
+ shell: bash
diff --git a/.github/actions/setup-sonar-tools/action.yml b/.github/actions/setup-sonar-tools/action.yml
new file mode 100644
index 0000000000..d791c120bf
--- /dev/null
+++ b/.github/actions/setup-sonar-tools/action.yml
@@ -0,0 +1,26 @@
+name: Set up Sonar tools
+description: Download and install sonar-scanner and build-wrapper
+runs:
+ using: "composite"
+ steps:
+ - run: echo "SONAR_SCANNER_VERSION=5.0.1.3006" >> $GITHUB_ENV
+ shell: bash
+ - run: echo "SONAR_SCANNER_HOME=${HOME}/.sonar/sonar-scanner-$SONAR_SCANNER_VERSION-linux" >> $GITHUB_ENV
+ shell: bash
+ - run: echo "SONAR_SCANNER_OPTS=-server" >> $GITHUB_ENV
+ shell: bash
+ - run: echo "${SONAR_SCANNER_HOME}/bin" >> $GITHUB_PATH
+ shell: bash
+ - run: echo "${HOME}/.sonar/build-wrapper-linux-x86" >> $GITHUB_PATH
+ shell: bash
+
+ - name: Install sonarcloud tools
+ run: |
+ sudo apt-get install nodejs curl unzip \
+ && curl --create-dirs -sSLo $HOME/.sonar/sonar-scanner.zip \
+ https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-$SONAR_SCANNER_VERSION-linux.zip \
+ && unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/ \
+ && curl --create-dirs -sSLo $HOME/.sonar/build-wrapper-linux-x86.zip \
+ https://sonarcloud.io/static/cpp/build-wrapper-linux-x86.zip \
+ && unzip -o $HOME/.sonar/build-wrapper-linux-x86.zip -d $HOME/.sonar/ \
+ shell: bash
diff --git a/.github/actions/setup-swig/action.yml b/.github/actions/setup-swig/action.yml
new file mode 100644
index 0000000000..0eb8a04473
--- /dev/null
+++ b/.github/actions/setup-swig/action.yml
@@ -0,0 +1,20 @@
+name: Set up SWIG
+description: |
+ Download and build SWIG and set the SWIG environment variable to the path of
+ the SWIG executable.
+
+inputs:
+ swig_version:
+ description: 'Swig version to build'
+ required: false
+ default: '4.1.1'
+
+runs:
+ using: "composite"
+ steps:
+ - name: Download and build SWIG
+ run: scripts/downloadAndBuildSwig.sh
+ shell: bash
+
+ - run: echo "SWIG=${AMICI_DIR}/ThirdParty/swig-${{ inputs.swig_version }}/install/bin/swig" >> $GITHUB_ENV
+ shell: bash
diff --git a/.github/workflows/deploy_branch.yml b/.github/workflows/deploy_branch.yml
index 77883907d3..73294286a1 100644
--- a/.github/workflows/deploy_branch.yml
+++ b/.github/workflows/deploy_branch.yml
@@ -21,12 +21,10 @@ jobs:
with:
fetch-depth: 20
- - run: echo "AMICI_DIR=$(pwd)" >> $GITHUB_ENV
- - run: echo "SWIG=${AMICI_DIR}/ThirdParty/swig-4.0.1/install/bin/swig" >> $GITHUB_ENV
+ - name: Set up SWIG
+ uses: ./.github/actions/setup-swig
- - name: Build swig4
- run: |
- sudo scripts/downloadAndBuildSwig.sh
+ - run: echo "AMICI_DIR=$(pwd)" >> $GITHUB_ENV
- name: Create AMICI sdist
run: |
diff --git a/.github/workflows/deploy_release.yml b/.github/workflows/deploy_release.yml
index ce493533e7..8fe931763c 100644
--- a/.github/workflows/deploy_release.yml
+++ b/.github/workflows/deploy_release.yml
@@ -24,16 +24,13 @@ jobs:
with:
fetch-depth: 20
- - run: echo "AMICI_DIR=$(pwd)" >> $GITHUB_ENV
- - run: echo "SWIG=${AMICI_DIR}/ThirdParty/swig-4.0.1/install/bin/swig" >> $GITHUB_ENV
+ - name: Set up SWIG
+ uses: ./.github/actions/setup-swig
- - name: Build swig4
- run: |
- sudo scripts/downloadAndBuildSwig.sh
+ - run: echo "AMICI_DIR=$(pwd)" >> $GITHUB_ENV
- name: sdist
- run: |
- scripts/buildSdist.sh
+ run: scripts/buildSdist.sh
- name: Publish a Python distribution to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
diff --git a/.github/workflows/test_benchmark_collection_models.yml b/.github/workflows/test_benchmark_collection_models.yml
index 9cfa50ea2a..bf9509e5ce 100644
--- a/.github/workflows/test_benchmark_collection_models.yml
+++ b/.github/workflows/test_benchmark_collection_models.yml
@@ -37,18 +37,15 @@ jobs:
with:
fetch-depth: 20
- # install dependencies
- - name: apt
- run: |
- sudo apt-get update \
- && sudo apt-get install -y swig libatlas-base-dev
+ - name: Install apt dependencies
+ uses: ./.github/actions/install-apt-dependencies
- run: echo "${HOME}/.local/bin/" >> $GITHUB_PATH
# install AMICI
- name: Create AMICI sdist
- run: |
- pip3 install build && cd python/sdist && python3 -m build --sdist
+ run: pip3 install build && cd python/sdist && python3 -m build --sdist
+
- name: Install AMICI sdist
run: |
pip3 install --user petab[vis] && \
diff --git a/.github/workflows/test_doc.yml b/.github/workflows/test_doc.yml
index 98c023ba79..07e0afdcc8 100644
--- a/.github/workflows/test_doc.yml
+++ b/.github/workflows/test_doc.yml
@@ -16,7 +16,6 @@ on:
jobs:
doxygen:
name: Test Doxygen
-
runs-on: ubuntu-22.04
strategy:
@@ -32,26 +31,14 @@ jobs:
- uses: actions/checkout@v3
- run: git fetch --prune --unshallow
- - name: apt
- run: |
- sudo apt-get update \
- && sudo apt-get install -y \
- bison \
- ragel \
- graphviz \
- texlive-latex-extra
-
- - name: Build doxygen
- run: |
- sudo scripts/downloadAndBuildDoxygen.sh
+ - name: Set up doxygen
+ uses: ./.github/actions/setup-doxygen
- name: Run doxygen
- run: |
- scripts/run-doxygen.sh
+ run: scripts/run-doxygen.sh
sphinx:
name: Test Sphinx
-
runs-on: ubuntu-22.04
strategy:
@@ -68,27 +55,22 @@ jobs:
- run: git fetch --prune --unshallow
- run: echo "AMICI_DIR=$(pwd)" >> $GITHUB_ENV
- - run: echo "SWIG=${AMICI_DIR}/ThirdParty/swig-4.1.1/install/bin/swig" >> $GITHUB_ENV
- - name: Build doxygen
- run: |
- sudo scripts/downloadAndBuildDoxygen.sh
+ - name: Set up doxygen
+ uses: ./.github/actions/setup-doxygen
- # install amici dependencies
- - name: apt
+ - name: Install apt dependencies
+ uses: ./.github/actions/install-apt-dependencies
+
+ - name: Install further dependencies
run: |
sudo apt-get update \
&& sudo apt-get install -y \
- g++ \
- libatlas-base-dev \
- libboost-serialization-dev \
pandoc \
- python3-venv \
+ python3-venv
- - name: Build swig
- run: |
- sudo scripts/downloadAndBuildSwig.sh
+ - name: Set up SWIG
+ uses: ./.github/actions/setup-swig
- - name: sphinx
- run: |
- scripts/run-sphinx.sh
+ - name: Run sphinx
+ run: scripts/run-sphinx.sh
diff --git a/.github/workflows/test_install.yml b/.github/workflows/test_install.yml
index 6721008599..be74cfa4c6 100644
--- a/.github/workflows/test_install.yml
+++ b/.github/workflows/test_install.yml
@@ -22,33 +22,26 @@ jobs:
- run: echo "AMICI_DIR=$(pwd)" >> $GITHUB_ENV
- # install amici dependencies
+ - name: Install apt dependencies
+ uses: ./.github/actions/install-apt-dependencies
+
- name: apt
run: |
sudo apt-get update \
&& sudo apt-get install -y \
- cmake \
- g++ \
- libatlas-base-dev \
- libboost-serialization-dev \
- libhdf5-serial-dev \
- swig
+ cmake
- name: Build suitesparse
- run: |
- scripts/buildSuiteSparse.sh
+ run: scripts/buildSuiteSparse.sh
- name: Build sundials
- run: |
- scripts/buildSundials.sh
+ run: scripts/buildSundials.sh
- name: Build AMICI
- run: |
- scripts/buildAmici.sh
+ run: scripts/buildAmici.sh
- name: Install python archive
- run: |
- scripts/installAmiciArchive.sh
+ run: scripts/installAmiciArchive.sh
sdist_ubuntu:
name: sdist Install Ubuntu
@@ -70,28 +63,17 @@ jobs:
- run: echo "AMICI_DIR=$(pwd)" >> $GITHUB_ENV
- # install amici dependencies
- - name: apt
- run: |
- sudo apt-get update \
- && sudo apt-get install -y \
- g++ \
- libatlas-base-dev \
- libboost-serialization-dev \
- libhdf5-serial-dev \
- swig
+ - name: Install apt dependencies
+ uses: ./.github/actions/install-apt-dependencies
- name: Create AMICI sdist
- run: |
- scripts/buildSdist.sh
+ run: scripts/buildSdist.sh
- name: Install python sdist
- run: |
- pip3 install -v --user $(ls -t python/sdist/dist/amici-*.tar.gz | head -1)
+ run: pip3 install -v --user $(ls -t python/sdist/dist/amici-*.tar.gz | head -1)
- name: Test import
- run: |
- python -m amici
+ run: python -m amici
sdist_macos:
@@ -124,13 +106,10 @@ jobs:
&& echo CPPFLAGS="-I /usr/local/Cellar/boost/1.81.0_1/include/" >> $GITHUB_ENV
- name: Create AMICI sdist
- run: |
- scripts/buildSdist.sh
+ run: scripts/buildSdist.sh
- name: Install python sdist
- run: |
- pip3 install -v --user $(ls -t python/sdist/dist/amici-*.tar.gz | head -1)
+ run: pip3 install -v --user $(ls -t python/sdist/dist/amici-*.tar.gz | head -1)
- name: Test import
- run: |
- python -m amici
+ run: python -m amici
diff --git a/.github/workflows/test_performance.yml b/.github/workflows/test_performance.yml
index 871421ffd0..de9dd686d9 100644
--- a/.github/workflows/test_performance.yml
+++ b/.github/workflows/test_performance.yml
@@ -35,11 +35,9 @@ jobs:
with:
fetch-depth: 20
- # install dependencies
- - name: apt
- run: |
- sudo apt-get update \
- && sudo apt-get install -y swig libatlas-base-dev
+ - name: Install apt dependencies
+ uses: ./.github/actions/install-apt-dependencies
+
- run: pip3 install petab shyaml build
- run: echo "${HOME}/.local/bin/" >> $GITHUB_PATH
diff --git a/.github/workflows/test_petab_test_suite.yml b/.github/workflows/test_petab_test_suite.yml
index 70acd254df..d5c4dc4fe8 100644
--- a/.github/workflows/test_petab_test_suite.yml
+++ b/.github/workflows/test_petab_test_suite.yml
@@ -34,18 +34,17 @@ jobs:
with:
fetch-depth: 20
+ - name: Install apt dependencies
+ uses: ./.github/actions/install-apt-dependencies
+
# install dependencies
- name: apt
run: |
sudo apt-get update \
- && sudo apt-get install -y \
- swig \
- libatlas-base-dev \
- python3-venv
+ && sudo apt-get install -y python3-venv
- name: Build BNGL
- run: |
- scripts/buildBNGL.sh
+ run: scripts/buildBNGL.sh
- run: |
echo "${HOME}/.local/bin/" >> $GITHUB_PATH
@@ -54,8 +53,7 @@ jobs:
# install AMICI
- name: Install python package
- run: |
- scripts/installAmiciSource.sh
+ run: scripts/installAmiciSource.sh
- name: Install petab
run: |
diff --git a/.github/workflows/test_pypi.yml b/.github/workflows/test_pypi.yml
index f8150aa42e..4f3533850f 100644
--- a/.github/workflows/test_pypi.yml
+++ b/.github/workflows/test_pypi.yml
@@ -11,7 +11,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- python-version: [3.9, 3.9, '3.10']
+ python-version: ["3.9", "3.10", "3.11"]
os: [ubuntu-22.04, macos-latest]
runs-on: ${{ matrix.os }}
diff --git a/.github/workflows/test_python_cplusplus.yml b/.github/workflows/test_python_cplusplus.yml
index 416252ee4b..a531d2db2c 100644
--- a/.github/workflows/test_python_cplusplus.yml
+++ b/.github/workflows/test_python_cplusplus.yml
@@ -8,20 +8,13 @@ on:
- master
jobs:
- build:
- name: Tests Ubuntu
-
- # TODO: prepare image with more deps preinstalled
+ ubuntu-cpp-python-tests:
+ name: C++/Python tests Ubuntu
runs-on: ubuntu-22.04
- env:
- AMICI_PARALLEL_COMPILE: ""
- ENABLE_GCOV_COVERAGE: "TRUE"
- CI_SONARCLOUD: "TRUE"
-
strategy:
matrix:
- python-version: [ 3.9 ]
+ python-version: [ "3.9" ]
steps:
- name: Set up Python ${{ matrix.python-version }}
@@ -33,70 +26,33 @@ jobs:
- run: git fetch --prune --unshallow
- run: echo "AMICI_DIR=$(pwd)" >> $GITHUB_ENV
- - run: echo "BNGPATH=${GITHUB_WORKSPACE}/ThirdParty/BioNetGen-2.7.0" >> $GITHUB_ENV
-
- # sonar cloud
- - run: echo "SONAR_SCANNER_VERSION=5.0.1.3006" >> $GITHUB_ENV
- - run: echo "SONAR_SCANNER_HOME=${HOME}/.sonar/sonar-scanner-$SONAR_SCANNER_VERSION-linux" >> $GITHUB_ENV
- - run: echo "SONAR_SCANNER_OPTS=-server" >> $GITHUB_ENV
- - run: echo "${SONAR_SCANNER_HOME}/bin" >> $GITHUB_PATH
- - run: echo "${HOME}/.sonar/build-wrapper-linux-x86" >> $GITHUB_PATH
-
- # TODO: add to ci image
- - name: Install sonarcloud tools
- run: |
- sudo apt-get install nodejs curl unzip \
- && curl --create-dirs -sSLo $HOME/.sonar/sonar-scanner.zip \
- https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-$SONAR_SCANNER_VERSION-linux.zip \
- && unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/ \
- && curl --create-dirs -sSLo $HOME/.sonar/build-wrapper-linux-x86.zip \
- https://sonarcloud.io/static/cpp/build-wrapper-linux-x86.zip \
- && unzip -o $HOME/.sonar/build-wrapper-linux-x86.zip -d $HOME/.sonar/ \
- # install amici dependencies
- - name: apt
- run: |
- sudo apt-get update \
- && sudo apt-get install -y \
- cmake \
- g++ \
- libatlas-base-dev \
- libboost-serialization-dev \
- libboost-chrono-dev \
- libhdf5-serial-dev \
- python3-venv \
- swig \
- lcov \
- libboost-math-dev
-
- - name: Build AMICI dependencies
- run: |
- scripts/buildDependencies.sh
-
- - name: Build AMICI
- run: |
- CI_SONARCLOUD=TRUE scripts/buildAmici.sh
+ - name: Set up AMICI C++ libraries
+ uses: ./.github/actions/setup-amici-cpp
- name: C++ tests
- run: |
- scripts/run-cpp-tests.sh
+ run: scripts/run-cpp-tests.sh
- name: Install python package
- run: |
- scripts/installAmiciSource.sh
+ run: scripts/installAmiciSource.sh
- - name: Python tests
+ - name: Check OpenMP support
+ run: source build/venv/bin/activate && python -c "import amici; import sys; sys.exit(not amici.compiledWithOpenMP())"
+
+ - name: Python tests (part 1)
run: |
source build/venv/bin/activate \
&& pytest \
--ignore-glob=*petab* \
--ignore-glob=*test_splines.py \
+ --ignore-glob=*test_splines_short.py \
+ --ignore-glob=*test_pysb.py \
--cov=amici \
--cov-report=xml:"${AMICI_DIR}/build/coverage_py.xml" \
--cov-append \
+ --durations=10 \
${AMICI_DIR}/python/tests
-
- name: Python tests splines
if: ${{ github.base_ref == 'master' || github.event.merge_group.base_ref == 'master'}}
run: |
@@ -105,20 +61,80 @@ jobs:
--cov=amici \
--cov-report=xml:"${AMICI_DIR}/build/coverage_py.xml" \
--cov-append \
+ --durations=10 \
${AMICI_DIR}/python/tests/test_splines.py
- - name: Install notebook dependencies
+ - name: Codecov Python
+ uses: codecov/codecov-action@v3.1.0
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
+ file: build/coverage_py.xml
+ flags: python
+ fail_ci_if_error: true
+ verbose: true
+
+ - name: Capture coverage info (lcov)
run: |
- source build/venv/bin/activate \
- && pip install jax[cpu]
+ lcov --compat-libtool --no-external \
+ -d ${AMICI_DIR}/build/CMakeFiles/amici.dir/src \
+ -b ${AMICI_DIR} -c -o coverage_cpp.info \
+ && lcov --compat-libtool --no-external \
+ -d ${AMICI_DIR}/python/sdist/build/temp_amici/CMakeFiles/amici.dir/src \
+ -b ${AMICI_DIR}/python/sdist -c -o coverage_py.info \
+ && lcov -a coverage_cpp.info -a coverage_py.info -o coverage.info
- - name: example notebooks
+ - name: Codecov CPP
+ uses: codecov/codecov-action@v3.1.0
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
+ file: coverage.info
+ flags: cpp
+ fail_ci_if_error: true
+
+ - name: Run sonar-scanner
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
run: |
- scripts/runNotebook.sh python/examples/example_*/
+ sonar-scanner \
+ -Dsonar.cfamily.build-wrapper-output=bw-output \
+ -Dsonar.projectVersion="$(git describe --abbrev=4 --dirty=-dirty --always --tags | tr -d '\n')"
- - name: doc notebooks
+ ubuntu-python-tests:
+ name: Python tests Ubuntu
+ runs-on: ubuntu-22.04
+
+ strategy:
+ matrix:
+ python-version: [ "3.9" ]
+
+ steps:
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - uses: actions/checkout@v3
+ - run: git fetch --prune --unshallow
+
+ - run: echo "AMICI_DIR=$(pwd)" >> $GITHUB_ENV
+
+ - name: Set up AMICI C++ libraries
+ uses: ./.github/actions/setup-amici-cpp
+
+ - name: Install python package
+ run: scripts/installAmiciSource.sh
+
+ - name: Python tests
run: |
- scripts/runNotebook.sh documentation/GettingStarted.ipynb
+ source build/venv/bin/activate \
+ && pytest \
+ --cov=amici \
+ --cov-report=xml:"${AMICI_DIR}/build/coverage_py.xml" \
+ --cov-append \
+ --durations=10 \
+ ${AMICI_DIR}/python/tests/test_pysb.py \
+ ${AMICI_DIR}/python/tests/test_splines_short.py
- name: Codecov Python
uses: codecov/codecov-action@v3.1.0
@@ -129,13 +145,13 @@ jobs:
fail_ci_if_error: true
verbose: true
- - name: lcov
+ - name: Capture coverage info (lcov)
run: |
lcov --compat-libtool --no-external \
-d ${AMICI_DIR}/build/CMakeFiles/amici.dir/src \
-b ${AMICI_DIR} -c -o coverage_cpp.info \
&& lcov --compat-libtool --no-external \
- -d ${AMICI_DIR}/python/sdist/build/$(python -c "import sys, sysconfig; print(f'temp.{sysconfig.get_platform()}-{sys.implementation.cache_tag}_amici')")/CMakeFiles/amici.dir/src \
+ -d ${AMICI_DIR}/python/sdist/build/temp_amici/CMakeFiles/amici.dir/src \
-b ${AMICI_DIR}/python/sdist -c -o coverage_py.info \
&& lcov -a coverage_cpp.info -a coverage_py.info -o coverage.info
@@ -144,7 +160,7 @@ jobs:
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: coverage.info
- flags: cpp
+ flags: cpp_python
fail_ci_if_error: true
- name: Run sonar-scanner
@@ -156,9 +172,47 @@ jobs:
-Dsonar.cfamily.build-wrapper-output=bw-output \
-Dsonar.projectVersion="$(git describe --abbrev=4 --dirty=-dirty --always --tags | tr -d '\n')"
+
+ ubuntu-notebook-tests:
+ name: Notebook tests Ubuntu
+ runs-on: ubuntu-22.04
+
+ strategy:
+ matrix:
+ python-version: [ "3.9" ]
+
+ steps:
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - uses: actions/checkout@v3
+ - run: git fetch --prune --unshallow
+
+ - run: echo "AMICI_DIR=$(pwd)" >> $GITHUB_ENV
+
+ - name: Set up AMICI C++ libraries
+ uses: ./.github/actions/setup-amici-cpp
+
+ - name: Install python package
+ run: scripts/installAmiciSource.sh
+
+ - name: Install notebook dependencies
+ run: |
+ source build/venv/bin/activate \
+ && pip install jax[cpu]
+
+ - name: example notebooks
+ run: scripts/runNotebook.sh python/examples/example_*/
+
+ - name: doc notebooks
+ run: scripts/runNotebook.sh documentation/GettingStarted.ipynb
+
+ # TODO: Include notebooks in coverage report
+
osx:
name: Tests OSX
-
runs-on: macos-latest
steps:
@@ -174,32 +228,27 @@ jobs:
- run: echo "BNGPATH=${AMICI_DIR}/ThirdParty/BioNetGen-2.7.0" >> $GITHUB_ENV
# Ensure CMake is using the python version that we will use for the python tests later on
- run: echo "PYTHON_EXECUTABLE=${Python3_ROOT_DIR}/bin/python3" >> $GITHUB_ENV
+ - run: echo "OpenMP_ROOT=$(brew --prefix)/opt/libomp" >> $GITHUB_ENV
+ - run: echo "BOOST_ROOT=$(brew --prefix)/opt/boost" >> $GITHUB_ENV
# install amici dependencies
- name: homebrew
- run: |
- brew install hdf5 swig gcc cppcheck libomp boost \
- && brew ls -v boost \
- && brew ls -v libomp \
- && echo LDFLAGS="-L/usr/local/lib/ -L/usr/local/Cellar/boost/1.81.0_1/lib/" >> $GITHUB_ENV \
- && echo CPPFLAGS="-I /usr/local/Cellar/boost/1.81.0_1/include/" >> $GITHUB_ENV
+ run: brew install hdf5 swig gcc cppcheck libomp boost
- name: Build AMICI
- run: |
- scripts/buildAll.sh
+ run: scripts/buildAll.sh
- name: Install python package
- run: |
- scripts/installAmiciSource.sh
+ run: scripts/installAmiciSource.sh
+
+ - name: Check OpenMP support
+ run: source build/venv/bin/activate && python -c "import amici; import sys; sys.exit(not amici.compiledWithOpenMP())"
- name: cppcheck
- run: |
- scripts/run-cppcheck.sh
+ run: scripts/run-cppcheck.sh
- name: Python tests
- run: |
- scripts/run-python-tests.sh
+ run: scripts/run-python-tests.sh
- name: C++ tests
- run: |
- scripts/run-cpp-tests.sh
+ run: scripts/run-cpp-tests.sh
diff --git a/.github/workflows/test_python_ver_matrix.yml b/.github/workflows/test_python_ver_matrix.yml
index 59dcf91041..866a3fc0f7 100644
--- a/.github/workflows/test_python_ver_matrix.yml
+++ b/.github/workflows/test_python_ver_matrix.yml
@@ -41,23 +41,14 @@ jobs:
with:
fetch-depth: 20
- # install dependencies
- - name: apt
- run: |
- sudo apt-get update \
- && sudo apt-get install -y \
- swig \
- libatlas-base-dev \
- libhdf5-serial-dev \
- libboost-math-dev
+ - name: Install apt dependencies
+ uses: ./.github/actions/install-apt-dependencies
# install AMICI
- name: Build BNGL
- run: |
- scripts/buildBNGL.sh
+ run: scripts/buildBNGL.sh
- name: Install python package
- run: |
- scripts/installAmiciSource.sh
+ run: scripts/installAmiciSource.sh
- name: Python tests
run: |
diff --git a/.github/workflows/test_sbml_semantic_test_suite.yml b/.github/workflows/test_sbml_semantic_test_suite.yml
index 68bfaed8e3..0fde56b8f9 100644
--- a/.github/workflows/test_sbml_semantic_test_suite.yml
+++ b/.github/workflows/test_sbml_semantic_test_suite.yml
@@ -40,10 +40,10 @@ jobs:
- uses: actions/checkout@v3
with:
fetch-depth: 1
- - name: apt
- run: |
- sudo apt-get update \
- && sudo apt-get install -y swig4.0 libatlas-base-dev
+
+ - name: Install apt dependencies
+ uses: ./.github/actions/install-apt-dependencies
+
- run: AMICI_PARALLEL_COMPILE=2 ./scripts/installAmiciSource.sh
- run: AMICI_PARALLEL_COMPILE=2 ./scripts/run-SBMLTestsuite.sh ${{ matrix.cases }}
diff --git a/.github/workflows/test_valgrind.yml b/.github/workflows/test_valgrind.yml
index 1138e663c0..32eea2e0c0 100644
--- a/.github/workflows/test_valgrind.yml
+++ b/.github/workflows/test_valgrind.yml
@@ -14,8 +14,6 @@ on:
jobs:
valgrind_cpp:
name: Valgrind C++
-
- # TODO: prepare image with more deps preinstalled
runs-on: ubuntu-22.04
strategy:
@@ -34,28 +32,23 @@ jobs:
- uses: actions/checkout@v3
- run: git fetch --prune --unshallow
+ - name: Install apt dependencies
+ uses: ./.github/actions/install-apt-dependencies
+
# install amici dependencies
- name: apt
run: |
sudo apt-get update \
&& sudo apt-get install -y \
cmake \
- g++ \
- libatlas-base-dev \
- libboost-serialization-dev \
- libhdf5-serial-dev \
python3-venv \
- swig \
- valgrind \
- libboost-math-dev
+ valgrind
- name: Build AMICI
- run: |
- scripts/buildAll.sh
+ run: scripts/buildAll.sh
- name: C++ tests / Valgrind
- run: |
- scripts/run-valgrind-cpp.sh
+ run: scripts/run-valgrind-cpp.sh
valgrind_python:
name: Valgrind Python
@@ -78,29 +71,23 @@ jobs:
- uses: actions/checkout@v3
- run: git fetch --prune --unshallow
+ - name: Install apt dependencies
+ uses: ./.github/actions/install-apt-dependencies
+
# install amici dependencies
- name: apt
run: |
sudo apt-get update \
&& sudo apt-get install -y \
cmake \
- g++ \
- libatlas-base-dev \
- libboost-serialization-dev \
- libhdf5-serial-dev \
python3-venv \
- swig \
- valgrind \
- libboost-math-dev
+ valgrind
- name: Build AMICI
- run: |
- scripts/buildAll.sh
+ run: scripts/buildAll.sh
- name: Install python package
- run: |
- scripts/installAmiciSource.sh
+ run: scripts/installAmiciSource.sh
- name: Python tests / Valgrind
- run: |
- scripts/run-valgrind-py.sh
+ run: scripts/run-valgrind-py.sh
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index e0ea39c7c2..521ff54f85 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,7 +6,7 @@ repos:
hooks:
- id: isort
name: isort (python)
- args: ["--profile", "black", "--filter-files"]
+ args: ["--profile", "black", "--filter-files", "--line-length", "79"]
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
@@ -17,12 +17,14 @@ repos:
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/psf/black
- rev: 23.3.0
+ rev: 23.7.0
hooks:
- - id: black
+ - id: black-jupyter
# It is recommended to specify the latest version of Python
# supported by your project here, or alternatively use
# pre-commit's default_language_version, see
# https://pre-commit.com/#top_level-default_language_version
language_version: python3.11
+ args: ["--line-length", "79"]
+
exclude: '^(ThirdParty|models)/'
diff --git a/CHANGELOG.md b/CHANGELOG.md
index aa46196e0e..4b511d92d0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,46 @@
## v0.X Series
+### v0.20.0 (2023-11-23)
+
+**Fixes**
+
+* Fixed CMake cmake_minimum_required deprecation warning
+ by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2183
+* Fixed misleading preequilibration failure messages
+ by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2181
+* Removed setuptools<64 restriction
+ by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2180
+* Fixed ExpData equality operator for Python
+ by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2194
+* Enabled deepcopy for ExpData(View)
+ by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2196
+* Allowed subsetting simulation conditions in simulate_petab
+ by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2199
+* Set CMake CMP0144 to prevent warning
+ by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2209
+
+**Features**
+
+* Possibility to evaluate and plot symbolic expressions based on simulation results
+ by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2152
+* Easier access to timepoints via ExpDataView
+ by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2193
+* Nicer `__repr__` for ReturnDataView
+ by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2192
+
+**Documentation**
+
+* Added installation instructions for Arch Linux
+ by @stephanmg in https://github.com/AMICI-dev/AMICI/pull/2173
+* Updated reference list
+ by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2172
+* Installation guide: optional requirements
+ by @dweindl in https://github.com/AMICI-dev/AMICI/pull/2207
+
+**Full Changelog**: https://github.com/AMICI-dev/AMICI/compare/v0.19.0...v0.20.0
+
+
### v0.19.0 (2023-08-26)
**Features**
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a31104b47e..d6f08a5097 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -4,6 +4,11 @@
cmake_minimum_required(VERSION 3.15)
cmake_policy(VERSION 3.15...3.27)
+# cmake >=3.27
+if(POLICY CMP0144)
+ cmake_policy(SET CMP0144 NEW)
+endif(POLICY CMP0144)
+
project(amici)
# misc options
diff --git a/LICENSE.md b/LICENSE.md
index d504f8447a..4dadbc28a9 100644
--- a/LICENSE.md
+++ b/LICENSE.md
@@ -27,7 +27,7 @@ The AMICI logo is released under the Creative Commons CC0 1.0 Universal
* Parts of the *SUNDIALS* solver suite are redistributed under the BSD 3-Clause
License (BSD-3-Clause) with terms given in
- `ThirdParty/SuiteSparse/LICENSE.txt`
+ `ThirdParty/sundials/LICENSE`
* Parts of *SuiteSparse* are redistributed under the various licenses with the
terms given in `ThirdParty/SuiteSparse/LICENSE.txt`
* *gsl-lite* is redistributed under the MIT License (MIT) with the terms given
diff --git a/binder/overview.ipynb b/binder/overview.ipynb
index 9c4959f372..0a7ce81084 100644
--- a/binder/overview.ipynb
+++ b/binder/overview.ipynb
@@ -12,7 +12,7 @@
"\n",
" Brief intro to AMICI for first-time users.\n",
"\n",
- "* [Example \"steadystate\"](../python/examples/example_steadystate/ExampleSteadystate.ipynb)\n",
+ "* [SBML import, observation model, sensitivity analysis, data export and visualization](../python/examples/example_steadystate/ExampleSteadystate.ipynb)\n",
"\n",
" A more detailed introduction to the AMICI interface, demonstrating sensitivity analysis, various options, finite difference checks, ...\n",
"\n",
@@ -34,7 +34,8 @@
"\n",
"* [Interfacing JAX](../python/examples/example_jax/ExampleJax.ipynb)\n",
"\n",
- " Provides guidance on how to combine AMICI with differential programming frameworks such as JAX.\n"
+ " Provides guidance on how to combine AMICI with differential programming frameworks such as JAX.\n",
+ "\n",
"* [Efficient spline interpolation](../python/examples/example_splines/ExampleSplines.ipynb)\n",
"\n",
" Shows how to add annotated spline formulas to existing SBML models in order to speed up AMICI's model import.\n",
diff --git a/documentation/ExampleJax.ipynb b/documentation/ExampleJax.ipynb
index f572b14384..1899305b67 100644
--- a/documentation/ExampleJax.ipynb
+++ b/documentation/ExampleJax.ipynb
@@ -46,10 +46,10 @@
"output_type": "stream",
"text": [
"Cloning into 'tmp/benchmark-models'...\n",
- "remote: Enumerating objects: 336, done.\u001B[K\n",
- "remote: Counting objects: 100% (336/336), done.\u001B[K\n",
- "remote: Compressing objects: 100% (285/285), done.\u001B[K\n",
- "remote: Total 336 (delta 88), reused 216 (delta 39), pack-reused 0\u001B[K\n",
+ "remote: Enumerating objects: 336, done.\u001b[K\n",
+ "remote: Counting objects: 100% (336/336), done.\u001b[K\n",
+ "remote: Compressing objects: 100% (285/285), done.\u001b[K\n",
+ "remote: Total 336 (delta 88), reused 216 (delta 39), pack-reused 0\u001b[K\n",
"Receiving objects: 100% (336/336), 2.11 MiB | 7.48 MiB/s, done.\n",
"Resolving deltas: 100% (88/88), done.\n"
]
@@ -58,7 +58,8 @@
"source": [
"!git clone --depth 1 https://github.com/Benchmarking-Initiative/Benchmark-Models-PEtab.git tmp/benchmark-models || (cd tmp/benchmark-models && git pull)\n",
"from pathlib import Path\n",
- "folder_base = Path('.') / \"tmp\" / \"benchmark-models\" / \"Benchmark-Models\""
+ "\n",
+ "folder_base = Path(\".\") / \"tmp\" / \"benchmark-models\" / \"Benchmark-Models\""
]
},
{
@@ -77,6 +78,7 @@
"outputs": [],
"source": [
"import petab\n",
+ "\n",
"model_name = \"Boehm_JProteomeRes2014\"\n",
"yaml_file = folder_base / model_name / (model_name + \".yaml\")\n",
"petab_problem = petab.Problem.from_yaml(yaml_file)"
@@ -570,6 +572,7 @@
],
"source": [
"from amici.petab_import import import_petab_problem\n",
+ "\n",
"amici_model = import_petab_problem(petab_problem, force_compile=True)"
]
},
@@ -606,14 +609,16 @@
"source": [
"from amici.petab_objective import simulate_petab\n",
"import amici\n",
+ "\n",
"amici_solver = amici_model.getSolver()\n",
"amici_solver.setSensitivityOrder(amici.SensitivityOrder.first)\n",
"\n",
+ "\n",
"def amici_hcb_base(parameters: jnp.array):\n",
" return simulate_petab(\n",
- " petab_problem, \n",
- " amici_model, \n",
- " problem_parameters=dict(zip(petab_problem.x_free_ids, parameters)), \n",
+ " petab_problem,\n",
+ " amici_model,\n",
+ " problem_parameters=dict(zip(petab_problem.x_free_ids, parameters)),\n",
" scaled_parameters=True,\n",
" solver=amici_solver,\n",
" )"
@@ -635,13 +640,14 @@
"outputs": [],
"source": [
"def amici_hcb_llh(parameters: jnp.array):\n",
- " return amici_hcb_base(parameters)['llh']\n",
+ " return amici_hcb_base(parameters)[\"llh\"]\n",
+ "\n",
"\n",
"def amici_hcb_sllh(parameters: jnp.array):\n",
- " sllh = amici_hcb_base(parameters)['sllh']\n",
- " return jnp.asarray(tuple(\n",
- " sllh[par_id] for par_id in petab_problem.x_free_ids\n",
- " ))"
+ " sllh = amici_hcb_base(parameters)[\"sllh\"]\n",
+ " return jnp.asarray(\n",
+ " tuple(sllh[par_id] for par_id in petab_problem.x_free_ids)\n",
+ " )"
]
},
{
@@ -663,6 +669,8 @@
"from jax import custom_jvp\n",
"\n",
"import numpy as np\n",
+ "\n",
+ "\n",
"@custom_jvp\n",
"def jax_objective(parameters: jnp.array):\n",
" return hcb.call(\n",
@@ -695,7 +703,9 @@
" sllh = hcb.call(\n",
" amici_hcb_sllh,\n",
" parameters,\n",
- " result_shape=jax.ShapeDtypeStruct((petab_problem.parameter_df.estimate.sum(),), np.float64),\n",
+ " result_shape=jax.ShapeDtypeStruct(\n",
+ " (petab_problem.parameter_df.estimate.sum(),), np.float64\n",
+ " ),\n",
" )\n",
" return llh, sllh.dot(x_dot)"
]
@@ -717,19 +727,25 @@
"source": [
"from jax import value_and_grad\n",
"\n",
- "parameter_scales = petab_problem.parameter_df.loc[petab_problem.x_free_ids, petab.PARAMETER_SCALE].values\n",
+ "parameter_scales = petab_problem.parameter_df.loc[\n",
+ " petab_problem.x_free_ids, petab.PARAMETER_SCALE\n",
+ "].values\n",
+ "\n",
"\n",
"@jax.jit\n",
"@value_and_grad\n",
"def jax_objective_with_parameter_transform(parameters: jnp.array):\n",
- " par_scaled = jnp.asarray(tuple(\n",
- " value if scale == petab.LIN\n",
- " else jnp.log(value) if scale == petab.LOG\n",
- " else jnp.log10(value)\n",
- " for value, scale in zip(parameters, parameter_scales)\n",
- " ))\n",
- " return jax_objective(par_scaled)\n",
- " "
+ " par_scaled = jnp.asarray(\n",
+ " tuple(\n",
+ " value\n",
+ " if scale == petab.LIN\n",
+ " else jnp.log(value)\n",
+ " if scale == petab.LOG\n",
+ " else jnp.log10(value)\n",
+ " for value, scale in zip(parameters, parameter_scales)\n",
+ " )\n",
+ " )\n",
+ " return jax_objective(par_scaled)"
]
},
{
@@ -755,7 +771,9 @@
"metadata": {},
"outputs": [],
"source": [
- "llh_jax, sllh_jax = jax_objective_with_parameter_transform(petab_problem.x_nominal_free)"
+ "llh_jax, sllh_jax = jax_objective_with_parameter_transform(\n",
+ " petab_problem.x_nominal_free\n",
+ ")"
]
},
{
@@ -777,7 +795,9 @@
"# TODO remove me as soon as sllh in simulate_petab is fixed\n",
"sllh = {\n",
" name: value / (np.log(10) * par_value)\n",
- " for (name, value), par_value in zip(r['sllh'].items(), petab_problem.x_nominal_free)\n",
+ " for (name, value), par_value in zip(\n",
+ " r[\"sllh\"].items(), petab_problem.x_nominal_free\n",
+ " )\n",
"}"
]
},
@@ -802,7 +822,8 @@
],
"source": [
"import pandas as pd\n",
- "pd.Series(dict(amici=r['llh'], jax=float(llh_jax)))"
+ "\n",
+ "pd.Series(dict(amici=r[\"llh\"], jax=float(llh_jax)))"
]
},
{
@@ -905,7 +926,9 @@
}
],
"source": [
- "pd.DataFrame(index=sllh.keys(), data=dict(amici=sllh.values(), jax=np.asarray(sllh_jax)))"
+ "pd.DataFrame(\n",
+ " index=sllh.keys(), data=dict(amici=sllh.values(), jax=np.asarray(sllh_jax))\n",
+ ")"
]
},
{
@@ -925,7 +948,9 @@
"outputs": [],
"source": [
"jax.config.update(\"jax_enable_x64\", True)\n",
- "llh_jax, sllh_jax = jax_objective_with_parameter_transform(petab_problem.x_nominal_free)"
+ "llh_jax, sllh_jax = jax_objective_with_parameter_transform(\n",
+ " petab_problem.x_nominal_free\n",
+ ")"
]
},
{
@@ -956,7 +981,7 @@
}
],
"source": [
- "pd.Series(dict(amici=r['llh'], jax=float(llh_jax)))"
+ "pd.Series(dict(amici=r[\"llh\"], jax=float(llh_jax)))"
]
},
{
@@ -1059,7 +1084,9 @@
}
],
"source": [
- "pd.DataFrame(index=sllh.keys(), data=dict(amici=sllh.values(), jax=np.asarray(sllh_jax)))"
+ "pd.DataFrame(\n",
+ " index=sllh.keys(), data=dict(amici=sllh.values(), jax=np.asarray(sllh_jax))\n",
+ ")"
]
}
],
diff --git a/documentation/GettingStarted.ipynb b/documentation/GettingStarted.ipynb
index 33258424b9..91fb9cb12c 100644
--- a/documentation/GettingStarted.ipynb
+++ b/documentation/GettingStarted.ipynb
@@ -26,7 +26,8 @@
"outputs": [],
"source": [
"import amici\n",
- "sbml_importer = amici.SbmlImporter('model_steadystate_scaled.xml')"
+ "\n",
+ "sbml_importer = amici.SbmlImporter(\"model_steadystate_scaled.xml\")"
]
},
{
@@ -42,8 +43,8 @@
"metadata": {},
"outputs": [],
"source": [
- "model_name = 'model_steadystate'\n",
- "model_dir = 'model_dir'\n",
+ "model_name = \"model_steadystate\"\n",
+ "model_dir = \"model_dir\"\n",
"sbml_importer.sbml2amici(model_name, model_dir)"
]
},
@@ -82,7 +83,7 @@
"metadata": {},
"outputs": [],
"source": [
- "model.setParameterByName('p1',1e-3)"
+ "model.setParameterByName(\"p1\", 1e-3)"
]
},
{
@@ -122,7 +123,7 @@
"outputs": [],
"source": [
"# set timepoints\n",
- "model.setTimepoints([0,1])\n",
+ "model.setTimepoints([0, 1])\n",
"rdata = amici.runAmiciSimulation(model, solver)"
]
},
diff --git a/documentation/amici_refs.bib b/documentation/amici_refs.bib
index 8730883075..4c31869d87 100644
--- a/documentation/amici_refs.bib
+++ b/documentation/amici_refs.bib
@@ -1011,19 +1011,6 @@ @Article{MassonisVil2022
url = {https://doi.org/10.1093/bioinformatics/btac755},
}
-@Article{RaimundezFed2022,
- author = {Raimundez, Elba and Fedders, Michael and Hasenauer, Jan},
- journal = {bioRxiv},
- title = {Posterior marginalization accelerates Bayesian inference for dynamical systems},
- year = {2022},
- abstract = {Bayesian inference is an important method in the life and natural sciences for learning from data. It provides information about parameter uncertainties, and thereby the reliability of models and their predictions. Yet, generating representative samples from the Bayesian posterior distribution is often computationally challenging. Here, we present an approach that lowers the computational complexity of sample generation for problems with scaling, offset and noise parameters. The proposed method is based on the marginalization of the posterior distribution, which reduces the dimensionality of the sampling problem. We provide analytical results for a broad class of problems and show that the method is suitable for a large number of applications. Subsequently, we demonstrate the benefit of the approach for various application examples from the field of systems biology. We report a substantial improvement up to 50 times in the effective sample size per unit of time, in particular when applied to multi-modal posterior problems. As the scheme is broadly applicable, it will facilitate Bayesian inference in different research fields.Competing Interest StatementThe authors have declared no competing interest.},
- doi = {10.1101/2022.12.02.518841},
- elocation-id = {2022.12.02.518841},
- eprint = {https://www.biorxiv.org/content/early/2022/12/03/2022.12.02.518841.full.pdf},
- publisher = {Cold Spring Harbor Laboratory},
- url = {https://www.biorxiv.org/content/early/2022/12/03/2022.12.02.518841},
-}
-
@Article{AlbadryHoe2022,
author = {Albadry, Mohamed and Höpfl, Sebastian and Ehteshamzad, Nadia and König, Matthias and Böttcher, Michael and Neumann, Jasna and Lupp, Amelie and Dirsch, Olaf and Radde, Nicole and Christ, Bruno and Christ, Madlen and Schwen, Lars Ole and Laue, Hendrik and Klopfleisch, Robert and Dahmen, Uta},
journal = {Scientific Reports},
@@ -1217,6 +1204,53 @@ @Article{TunedalVio2023
url = {https://physoc.onlinelibrary.wiley.com/doi/abs/10.1113/JP284652},
}
+@Unknown{HasenauerMer2023,
+ author = {Hasenauer, Jan and Merkt, Simon and Ali, Solomon and Gudina, Esayas and Adissu, Wondimagegn and Münchhoff, Maximilian and Graf, Alexander and Krebs, Stefan and Elsbernd, Kira and Kisch, Rebecca and Sirgu, Sisay and Fantahun, Bereket and Bekele, Delayehu and Rubio-Acero, Raquel and Gashaw, Mulatu and Girma, Eyob and Yilma, Daniel and Zeynudin, Ahmed and Paunovic, Ivana and Wieser, Andreas},
+ creationdate = {2023-09-19T09:21:01},
+ doi = {10.21203/rs.3.rs-3307821/v1},
+ modificationdate = {2023-09-19T09:21:01},
+ month = {09},
+ title = {Long-term monitoring of SARS-CoV-2 seroprevalence and variants in Ethiopia provides prediction for immunity and cross-immunity},
+ year = {2023},
+}
+
+@Article{RaimundezFed2023,
+ author = {Elba Raim{\'{u}}ndez and Michael Fedders and Jan Hasenauer},
+ journal = {{iScience}},
+ title = {Posterior marginalization accelerates Bayesian inference for dynamical models of biological processes},
+ year = {2023},
+ month = {sep},
+ pages = {108083},
+ creationdate = {2023-10-04T14:12:00},
+ doi = {10.1016/j.isci.2023.108083},
+ modificationdate = {2023-10-04T14:12:00},
+ publisher = {Elsevier {BV}},
+}
+
+@Article{Mendes2023,
+ author = {Mendes, Pedro},
+ journal = {Frontiers in Cell and Developmental Biology},
+ title = {Reproducibility and FAIR principles: the case of a segment polarity network model},
+ year = {2023},
+ issn = {2296-634X},
+ volume = {11},
+ abstract = {The issue of reproducibility of computational models and the related FAIR principles (findable, accessible, interoperable, and reusable) are examined in a specific test case. I analyze a computational model of the segment polarity network in Drosophila embryos published in 2000. Despite the high number of citations to this publication, 23 years later the model is barely accessible, and consequently not interoperable. Following the text of the original publication allowed successfully encoding the model for the open source software COPASI. Subsequently saving the model in the SBML format allowed it to be reused in other open source software packages. Submission of this SBML encoding of the model to the BioModels database enables its findability and accessibility. This demonstrates how the FAIR principles can be successfully enabled by using open source software, widely adopted standards, and public repositories, facilitating reproducibility and reuse of computational cell biology models that will outlive the specific software used.},
+ creationdate = {2023-10-28T19:05:54},
+ doi = {10.3389/fcell.2023.1201673},
+ modificationdate = {2023-10-28T19:05:54},
+ url = {https://www.frontiersin.org/articles/10.3389/fcell.2023.1201673},
+}
+
+@Misc{HuckBal2023,
+ author = {Wilhelm Huck and Mathieu Baltussen and Thijs de Jong and Quentin Duez and William Robinson},
+ title = {Chemical reservoir computation in a self-organizing reaction network},
+ year = {2023},
+ creationdate = {2023-11-18T09:09:45},
+ doi = {10.21203/rs.3.rs-3487081/v1},
+ modificationdate = {2023-11-18T09:10:08},
+ publisher = {Research Square Platform LLC},
+}
+
@Comment{jabref-meta: databaseType:bibtex;}
@Comment{jabref-meta: grouping:
diff --git a/documentation/conf.py b/documentation/conf.py
index 96209e4c31..ba88b25a8d 100644
--- a/documentation/conf.py
+++ b/documentation/conf.py
@@ -52,7 +52,9 @@ def my_exhale_generate_doxygen(doxygen_input):
DomainDirectiveFactory as breathe_DomainDirectiveFactory,
)
-old_breathe_DomainDirectiveFactory_create = breathe_DomainDirectiveFactory.create
+old_breathe_DomainDirectiveFactory_create = (
+ breathe_DomainDirectiveFactory.create
+)
def my_breathe_DomainDirectiveFactory_create(domain: str, args):
@@ -67,7 +69,9 @@ def my_breathe_DomainDirectiveFactory_create(domain: str, args):
return cls(domain + ":" + name, *args[1:])
-breathe_DomainDirectiveFactory.create = my_breathe_DomainDirectiveFactory_create
+breathe_DomainDirectiveFactory.create = (
+ my_breathe_DomainDirectiveFactory_create
+)
# END Monkeypatch breathe
@@ -102,7 +106,9 @@ def install_doxygen():
subprocess.run(cmd, shell=True, check=True)
assert os.path.islink(os.path.join(some_dir_on_path, "doxygen"))
# verify it's available
- res = subprocess.run(["doxygen", "--version"], check=False, capture_output=True)
+ res = subprocess.run(
+ ["doxygen", "--version"], check=False, capture_output=True
+ )
print(res.stdout.decode(), res.stderr.decode())
assert version in res.stdout.decode()
@@ -176,7 +182,10 @@ def install_doxygen():
intersphinx_mapping = {
"pysb": ("https://pysb.readthedocs.io/en/stable/", None),
- "petab": ("https://petab.readthedocs.io/projects/libpetab-python/en/latest/", None),
+ "petab": (
+ "https://petab.readthedocs.io/projects/libpetab-python/en/latest/",
+ None,
+ ),
"pandas": ("https://pandas.pydata.org/docs/", None),
"numpy": ("https://numpy.org/devdocs/", None),
"sympy": ("https://docs.sympy.org/latest/", None),
@@ -291,7 +300,9 @@ def install_doxygen():
"verboseBuild": True,
}
-mtocpp_filter = os.path.join(amici_dir, "matlab", "mtoc", "config", "mtocpp_filter.sh")
+mtocpp_filter = os.path.join(
+ amici_dir, "matlab", "mtoc", "config", "mtocpp_filter.sh"
+)
exhale_projects_args = {
"AMICI_CPP": {
"exhaleDoxygenStdin": "\n".join(
@@ -504,10 +515,14 @@ def process_docstring(app, what, name, obj, options, lines):
for old, new in typemaps.items():
lines[i] = lines[i].replace(old, new)
lines[i] = re.sub(
- r"amici::(Model|Solver|ExpData) ", r":class:`amici\.amici\.\1\`", lines[i]
+ r"amici::(Model|Solver|ExpData) ",
+ r":class:`amici\.amici\.\1\`",
+ lines[i],
)
lines[i] = re.sub(
- r"amici::(runAmiciSimulation[s]?)", r":func:`amici\.amici\.\1`", lines[i]
+ r"amici::(runAmiciSimulation[s]?)",
+ r":func:`amici\.amici\.\1`",
+ lines[i],
)
diff --git a/documentation/python_installation.rst b/documentation/python_installation.rst
index a0fcf0908b..6cc3402e61 100644
--- a/documentation/python_installation.rst
+++ b/documentation/python_installation.rst
@@ -13,7 +13,7 @@ Installation of the AMICI Python package has the following prerequisites:
* CBLAS compatible BLAS library
(e.g., OpenBLAS, CBLAS, Atlas, Accelerate, Intel MKL)
* a C++17 compatible C++ compiler and a C compiler
- (e.g., g++, clang, Intel C++ compiler, mingw)
+ (e.g., g++>=9.1, clang>=12, Intel C++ compiler, mingw)
If these requirements are fulfilled and all relevant paths are setup properly,
AMICI can be installed using:
@@ -44,6 +44,9 @@ Install the AMICI dependencies via ``apt``
# optionally for HDF5 support:
sudo apt install libhdf5-serial-dev
+ # optionally for boost support (thread-specific CPU times, extended math functions, serialization)
+ libboost-chrono-dev libboost-math-dev libboost-serialization-dev
+
Install AMICI:
.. code-block:: bash
@@ -88,13 +91,13 @@ Alternatively:
.. code-block:: bash
- sudo pacman -Si python swig openblas gcc hdf5 boost-libs
+ sudo pacman -Si python swig openblas gcc hdf5 boost-libs
2. Upgrade installed packages if required mininum versions are not satisfied for AMICI installation.
.. code-block:: bash
- sudo pacman -Su python swig openblas gcc hdf5 boost-libs
+ sudo pacman -Su python swig openblas gcc hdf5 boost-libs
3. Install AMICI:
@@ -117,6 +120,13 @@ Install the AMICI dependencies using homebrew:
# optionally for parallel simulations:
brew install libomp
+ # followed by either `brew link openmp` once,
+ # or `export OpenMP_ROOT=$(brew --prefix)/opt/libomp"` where `OpenMP_ROOT` will have to be set during every re-installation of AMICI or any new model import
+
+ # optionally for boost support (thread-specific CPU times, extended math functions, serialization)
+ brew install boost && export BOOST_ROOT=$(brew --prefix)/opt/boost
+ # followed by either `brew link boost` once,
+ # or `export BOOST_ROOT=$(brew --prefix)/opt/boost"` where `BOOST_ROOT` will have to be set during every re-installation of AMICI or any new model import
Install AMICI:
diff --git a/documentation/recreate_reference_list.py b/documentation/recreate_reference_list.py
index 1dd1c13b4b..034c884c4b 100755
--- a/documentation/recreate_reference_list.py
+++ b/documentation/recreate_reference_list.py
@@ -42,7 +42,10 @@ def get_sub_bibliography(year, by_year, bibfile):
entries = ",".join(["@" + x for x in by_year[year]])
stdin_input = (
- "---\n" f"bibliography: {bibfile}\n" f'nocite: "{entries}"\n...\n' f"# {year}"
+ "---\n"
+ f"bibliography: {bibfile}\n"
+ f'nocite: "{entries}"\n...\n'
+ f"# {year}"
)
out = subprocess.run(
@@ -67,7 +70,8 @@ def main():
with open(outfile, "w") as f:
f.write("# References\n\n")
f.write(
- "List of publications using AMICI. " f"Total number is {num_total}.\n\n"
+ "List of publications using AMICI. "
+ f"Total number is {num_total}.\n\n"
)
f.write(
"If you applied AMICI in your work and your publication is "
diff --git a/documentation/references.md b/documentation/references.md
index 3f5833e26f..00c3f40cc8 100644
--- a/documentation/references.md
+++ b/documentation/references.md
@@ -1,6 +1,6 @@
# References
-List of publications using AMICI. Total number is 79.
+List of publications using AMICI. Total number is 82.
If you applied AMICI in your work and your publication is missing, please let us know via a new GitHub issue.
@@ -51,6 +51,20 @@ Rewiring Contribute to Drug Resistance.” Molecular Systems
Biology 19 (2): e10988. https://doi.org/10.15252/msb.202210988.
+
+Hasenauer, Jan, Simon Merkt, Solomon Ali, Esayas Gudina, Wondimagegn
+Adissu, Maximilian Münchhoff, Alexander Graf, et al. 2023.
+
“Long-Term Monitoring of SARS-CoV-2 Seroprevalence and Variants in
+Ethiopia Provides Prediction for Immunity and Cross-Immunity.” https://doi.org/10.21203/rs.3.rs-3307821/v1.
+
+
+Huck, Wilhelm, Mathieu Baltussen, Thijs de Jong, Quentin Duez, and
+William Robinson. 2023.
“Chemical Reservoir Computation in a
+Self-Organizing Reaction Network.” Research Square Platform LLC.
+
https://doi.org/10.21203/rs.3.rs-3487081/v1.
+
Lakrisenko, Polina, Paul Stapor, Stephan Grein, Łukasz Paszkowski, Dilan
@@ -60,6 +74,12 @@ at Steady-State in ODE Models of Biochemical Reaction Networks.”
PLOS Computational Biology 19 (1): 1–19.
https://doi.org/10.1371/journal.pcbi.1010783.
+
+Mendes, Pedro. 2023.
“Reproducibility and FAIR Principles: The
+Case of a Segment Polarity Network Model.” Frontiers in Cell
+and Developmental Biology 11.
https://doi.org/10.3389/fcell.2023.1201673.
+
Mishra, Shekhar, Ziyu Wang, Michael J. Volk, and Huimin Zhao. 2023.
“Design and Application of a Kinetic Model of Lipid Metabolism in
@@ -67,6 +87,13 @@ Saccharomyces Cerevisiae.” Metabolic Engineering 75:
12–18.
https://doi.org/10.1016/j.ymben.2022.11.003.
+
+Raimúndez, Elba, Michael Fedders, and Jan Hasenauer. 2023.
+
“Posterior Marginalization Accelerates Bayesian Inference for
+Dynamical Models of Biological Processes.”
+
iScience, September, 108083.
https://doi.org/10.1016/j.isci.2023.108083.
+
Sluijs, Bob van, Tao Zhou, Britta Helwig, Mathieu Baltussen, Frank
Nelissen, Hans Heus, and Wilhelm Huck. 2023.
“Inverse Design of
@@ -124,12 +151,6 @@ with Forward Sensitivity Analysis.” Journal of Chromatography
A, 463741.
https://doi.org/10.1016/j.chroma.2022.463741.
-
-Raimundez, Elba, Michael Fedders, and Jan Hasenauer. 2022.
-
“Posterior Marginalization Accelerates Bayesian Inference for
-Dynamical Systems.” bioRxiv.
https://doi.org/10.1101/2022.12.02.518841.
-
Schmucker, Robin, Gabriele Farina, James Faeder, Fabian Fröhlich, Ali
Sinan Saglam, and Tuomas Sandholm. 2022.
“Combination Treatment
diff --git a/include/amici/steadystateproblem.h b/include/amici/steadystateproblem.h
index b3af55c20a..dc19c014c4 100644
--- a/include/amici/steadystateproblem.h
+++ b/include/amici/steadystateproblem.h
@@ -208,8 +208,15 @@ class SteadystateProblem {
/**
* @brief Stores state and throws an exception if equilibration failed
- */
- [[noreturn]] void handleSteadyStateFailure();
+ * @param tried_newton_1 Whether any Newton step was attempted before
+ * simulation
+ * @param tried_simulation Whether simulation was attempted
+ * @param tried_newton_2 Whether any Newton step was attempted after
+ * simulation
+ */
+ [[noreturn]] void handleSteadyStateFailure(
+ bool tried_newton_1, bool tried_simulation, bool tried_newton_2
+ );
/**
* @brief Assembles the error message to be thrown.
diff --git a/python/benchmark/benchmark_pysb.py b/python/benchmark/benchmark_pysb.py
index e3b505300e..079041ed4d 100644
--- a/python/benchmark/benchmark_pysb.py
+++ b/python/benchmark/benchmark_pysb.py
@@ -33,7 +33,9 @@
with amici.add_path(os.path.dirname(pysb.examples.__file__)):
with amici.add_path(
- os.path.join(os.path.dirname(__file__), "..", "tests", "pysb_test_models")
+ os.path.join(
+ os.path.dirname(__file__), "..", "tests", "pysb_test_models"
+ )
):
pysb.SelfExporter.cleanup() # reset pysb
pysb.SelfExporter.do_export = True
@@ -52,9 +54,9 @@
integrator_options={"rtol": rtol, "atol": atol},
)
time_pysb = (
- timeit.Timer("pysb_simres = sim.run()", globals={"sim": sim}).timeit(
- number=N_REPEATS
- )
+ timeit.Timer(
+ "pysb_simres = sim.run()", globals={"sim": sim}
+ ).timeit(number=N_REPEATS)
/ N_REPEATS
)
@@ -76,7 +78,9 @@
observables=list(pysb_model.observables.keys()),
)
- amici_model_module = amici.import_model_module(pysb_model.name, outdir)
+ amici_model_module = amici.import_model_module(
+ pysb_model.name, outdir
+ )
model_pysb = amici_model_module.getModel()
@@ -89,7 +93,11 @@
time_amici = (
timeit.Timer(
"rdata = amici.runAmiciSimulation(model, solver)",
- globals={"model": model_pysb, "solver": solver, "amici": amici},
+ globals={
+ "model": model_pysb,
+ "solver": solver,
+ "amici": amici,
+ },
).timeit(number=N_REPEATS)
/ N_REPEATS
)
diff --git a/python/examples/example_constant_species/ExampleEquilibrationLogic.ipynb b/python/examples/example_constant_species/ExampleEquilibrationLogic.ipynb
index 7c8ceec6cd..5f66ea4db9 100644
--- a/python/examples/example_constant_species/ExampleEquilibrationLogic.ipynb
+++ b/python/examples/example_constant_species/ExampleEquilibrationLogic.ipynb
@@ -60,7 +60,10 @@
],
"source": [
"from IPython.display import Image\n",
- "fig = Image(filename=('../../../documentation/gfx/steadystate_solver_workflow.png'))\n",
+ "\n",
+ "fig = Image(\n",
+ " filename=(\"../../../documentation/gfx/steadystate_solver_workflow.png\")\n",
+ ")\n",
"fig"
]
},
@@ -102,11 +105,11 @@
"import matplotlib.pyplot as plt\n",
"\n",
"# SBML model we want to import\n",
- "sbml_file = 'model_constant_species.xml'\n",
+ "sbml_file = \"model_constant_species.xml\"\n",
"\n",
"# Name of the models that will also be the name of the python module\n",
- "model_name = 'model_constant_species'\n",
- "model_reduced_name = model_name + '_reduced'\n",
+ "model_name = \"model_constant_species\"\n",
+ "model_reduced_name = model_name + \"_reduced\"\n",
"\n",
"# Directories to which the generated model code is written\n",
"model_output_dir = model_name\n",
@@ -118,18 +121,41 @@
"sbml_model = sbml_doc.getModel()\n",
"dir(sbml_doc)\n",
"\n",
- "print('Species: ', [s.getId() for s in sbml_model.getListOfSpecies()])\n",
+ "print(\"Species: \", [s.getId() for s in sbml_model.getListOfSpecies()])\n",
"\n",
- "print('\\nReactions:')\n",
+ "print(\"\\nReactions:\")\n",
"for reaction in sbml_model.getListOfReactions():\n",
- " reactants = ' + '.join(['%s %s'%(int(r.getStoichiometry()) if r.getStoichiometry() > 1 else '', r.getSpecies()) for r in reaction.getListOfReactants()])\n",
- " products = ' + '.join(['%s %s'%(int(r.getStoichiometry()) if r.getStoichiometry() > 1 else '', r.getSpecies()) for r in reaction.getListOfProducts()])\n",
- " reversible = '<' if reaction.getReversible() else ''\n",
- " print('%3s: %10s %1s->%10s\\t\\t[%s]' % (reaction.getId(),\n",
- " reactants,\n",
- " reversible,\n",
- " products,\n",
- " libsbml.formulaToL3String(reaction.getKineticLaw().getMath())))"
+ " reactants = \" + \".join(\n",
+ " [\n",
+ " \"%s %s\"\n",
+ " % (\n",
+ " int(r.getStoichiometry()) if r.getStoichiometry() > 1 else \"\",\n",
+ " r.getSpecies(),\n",
+ " )\n",
+ " for r in reaction.getListOfReactants()\n",
+ " ]\n",
+ " )\n",
+ " products = \" + \".join(\n",
+ " [\n",
+ " \"%s %s\"\n",
+ " % (\n",
+ " int(r.getStoichiometry()) if r.getStoichiometry() > 1 else \"\",\n",
+ " r.getSpecies(),\n",
+ " )\n",
+ " for r in reaction.getListOfProducts()\n",
+ " ]\n",
+ " )\n",
+ " reversible = \"<\" if reaction.getReversible() else \"\"\n",
+ " print(\n",
+ " \"%3s: %10s %1s->%10s\\t\\t[%s]\"\n",
+ " % (\n",
+ " reaction.getId(),\n",
+ " reactants,\n",
+ " reversible,\n",
+ " products,\n",
+ " libsbml.formulaToL3String(reaction.getKineticLaw().getMath()),\n",
+ " )\n",
+ " )"
]
},
{
@@ -142,25 +168,29 @@
"sbml_importer = amici.SbmlImporter(sbml_file)\n",
"\n",
"# specify observables and constant parameters\n",
- "constantParameters = ['synthesis_substrate', 'init_enzyme']\n",
+ "constantParameters = [\"synthesis_substrate\", \"init_enzyme\"]\n",
"observables = {\n",
- " 'observable_product': {'name': '', 'formula': 'product'},\n",
- " 'observable_substrate': {'name': '', 'formula': 'substrate'},\n",
+ " \"observable_product\": {\"name\": \"\", \"formula\": \"product\"},\n",
+ " \"observable_substrate\": {\"name\": \"\", \"formula\": \"substrate\"},\n",
"}\n",
- "sigmas = {'observable_product': 1.0, 'observable_substrate': 1.0}\n",
+ "sigmas = {\"observable_product\": 1.0, \"observable_substrate\": 1.0}\n",
"\n",
"# import the model\n",
- "sbml_importer.sbml2amici(model_reduced_name,\n",
- " model_reduced_output_dir,\n",
- " observables=observables,\n",
- " constant_parameters=constantParameters,\n",
- " sigmas=sigmas)\n",
- "sbml_importer.sbml2amici(model_name,\n",
- " model_output_dir,\n",
- " observables=observables,\n",
- " constant_parameters=constantParameters,\n",
- " sigmas=sigmas,\n",
- " compute_conservation_laws=False)"
+ "sbml_importer.sbml2amici(\n",
+ " model_reduced_name,\n",
+ " model_reduced_output_dir,\n",
+ " observables=observables,\n",
+ " constant_parameters=constantParameters,\n",
+ " sigmas=sigmas,\n",
+ ")\n",
+ "sbml_importer.sbml2amici(\n",
+ " model_name,\n",
+ " model_output_dir,\n",
+ " observables=observables,\n",
+ " constant_parameters=constantParameters,\n",
+ " sigmas=sigmas,\n",
+ " compute_conservation_laws=False,\n",
+ ")"
]
},
{
@@ -219,10 +249,14 @@
],
"source": [
"# import the models and run some test simulations\n",
- "model_reduced_module = amici.import_model_module(model_reduced_name, os.path.abspath(model_reduced_output_dir))\n",
+ "model_reduced_module = amici.import_model_module(\n",
+ " model_reduced_name, os.path.abspath(model_reduced_output_dir)\n",
+ ")\n",
"model_reduced = model_reduced_module.getModel()\n",
"\n",
- "model_module = amici.import_model_module(model_name, os.path.abspath(model_output_dir))\n",
+ "model_module = amici.import_model_module(\n",
+ " model_name, os.path.abspath(model_output_dir)\n",
+ ")\n",
"model = model_module.getModel()\n",
"\n",
"\n",
@@ -238,6 +272,7 @@
"\n",
"# plot trajectories\n",
"import amici.plotting\n",
+ "\n",
"amici.plotting.plotStateTrajectories(rdata_reduced, model=model_reduced)\n",
"amici.plotting.plotObservableTrajectories(rdata_reduced, model=model_reduced)\n",
"\n",
@@ -336,9 +371,9 @@
"solver.setMaxSteps(1000)\n",
"rdata = amici.runAmiciSimulation(model, solver)\n",
"\n",
- "#np.set_printoptions(threshold=8, edgeitems=2)\n",
+ "# np.set_printoptions(threshold=8, edgeitems=2)\n",
"for key, value in rdata.items():\n",
- " print('%12s: ' % key, value)"
+ " print(\"%12s: \" % key, value)"
]
},
{
@@ -399,8 +434,10 @@
"# reduce maxsteps for integration\n",
"solver.setMaxSteps(100)\n",
"rdata = amici.runAmiciSimulation(model, solver)\n",
- "print('Status of postequilibration:', rdata['posteq_status'])\n",
- "print('Number of steps employed in postequilibration:', rdata['posteq_numsteps'])"
+ "print(\"Status of postequilibration:\", rdata[\"posteq_status\"])\n",
+ "print(\n",
+ " \"Number of steps employed in postequilibration:\", rdata[\"posteq_numsteps\"]\n",
+ ")"
]
},
{
@@ -437,8 +474,11 @@
"solver_reduced.setMaxSteps(100)\n",
"rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced)\n",
"\n",
- "print('Status of postequilibration:', rdata_reduced['posteq_status'])\n",
- "print('Number of steps employed in postequilibration:', rdata_reduced['posteq_numsteps'])"
+ "print(\"Status of postequilibration:\", rdata_reduced[\"posteq_status\"])\n",
+ "print(\n",
+ " \"Number of steps employed in postequilibration:\",\n",
+ " rdata_reduced[\"posteq_numsteps\"],\n",
+ ")"
]
},
{
@@ -498,7 +538,9 @@
"source": [
"# Call simulation with singular Jacobian and integrateIfNewtonFails mode\n",
"model.setTimepoints(np.full(1, np.inf))\n",
- "model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.integrateIfNewtonFails)\n",
+ "model.setSteadyStateSensitivityMode(\n",
+ " amici.SteadyStateSensitivityMode.integrateIfNewtonFails\n",
+ ")\n",
"solver = model.getSolver()\n",
"solver.setNewtonMaxSteps(10)\n",
"solver.setSensitivityMethod(amici.SensitivityMethod.forward)\n",
@@ -506,10 +548,12 @@
"solver.setMaxSteps(10000)\n",
"rdata = amici.runAmiciSimulation(model, solver)\n",
"\n",
- "print('Status of postequilibration:', rdata['posteq_status'])\n",
- "print('Number of steps employed in postequilibration:', rdata['posteq_numsteps'])\n",
- "print('Computed state sensitivities:')\n",
- "print(rdata['sx'][0,:,:])"
+ "print(\"Status of postequilibration:\", rdata[\"posteq_status\"])\n",
+ "print(\n",
+ " \"Number of steps employed in postequilibration:\", rdata[\"posteq_numsteps\"]\n",
+ ")\n",
+ "print(\"Computed state sensitivities:\")\n",
+ "print(rdata[\"sx\"][0, :, :])"
]
},
{
@@ -550,17 +594,21 @@
"source": [
"# Call simulation with singular Jacobian and newtonOnly mode (will fail)\n",
"model.setTimepoints(np.full(1, np.inf))\n",
- "model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.newtonOnly)\n",
+ "model.setSteadyStateSensitivityMode(\n",
+ " amici.SteadyStateSensitivityMode.newtonOnly\n",
+ ")\n",
"solver = model.getSolver()\n",
"solver.setSensitivityMethod(amici.SensitivityMethod.forward)\n",
"solver.setSensitivityOrder(amici.SensitivityOrder.first)\n",
"solver.setMaxSteps(10000)\n",
"rdata = amici.runAmiciSimulation(model, solver)\n",
"\n",
- "print('Status of postequilibration:', rdata['posteq_status'])\n",
- "print('Number of steps employed in postequilibration:', rdata['posteq_numsteps'])\n",
- "print('Computed state sensitivities:')\n",
- "print(rdata['sx'][0,:,:])"
+ "print(\"Status of postequilibration:\", rdata[\"posteq_status\"])\n",
+ "print(\n",
+ " \"Number of steps employed in postequilibration:\", rdata[\"posteq_numsteps\"]\n",
+ ")\n",
+ "print(\"Computed state sensitivities:\")\n",
+ "print(rdata[\"sx\"][0, :, :])"
]
},
{
@@ -586,7 +634,9 @@
"source": [
"# Call postequilibration by setting an infinity timepoint\n",
"model_reduced.setTimepoints(np.full(1, np.inf))\n",
- "model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.newtonOnly)\n",
+ "model.setSteadyStateSensitivityMode(\n",
+ " amici.SteadyStateSensitivityMode.newtonOnly\n",
+ ")\n",
"solver_reduced = model_reduced.getSolver()\n",
"solver_reduced.setNewtonMaxSteps(10)\n",
"solver_reduced.setSensitivityMethod(amici.SensitivityMethod.forward)\n",
@@ -594,10 +644,13 @@
"solver_reduced.setMaxSteps(1000)\n",
"rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced)\n",
"\n",
- "print('Status of postequilibration:', rdata_reduced['posteq_status'])\n",
- "print('Number of steps employed in postequilibration:', rdata_reduced['posteq_numsteps'])\n",
- "print('Computed state sensitivities:')\n",
- "print(rdata_reduced['sx'][0,:,:])"
+ "print(\"Status of postequilibration:\", rdata_reduced[\"posteq_status\"])\n",
+ "print(\n",
+ " \"Number of steps employed in postequilibration:\",\n",
+ " rdata_reduced[\"posteq_numsteps\"],\n",
+ ")\n",
+ "print(\"Computed state sensitivities:\")\n",
+ "print(rdata_reduced[\"sx\"][0, :, :])"
]
},
{
@@ -646,11 +699,13 @@
"source": [
"# Call adjoint postequilibration by setting an infinity timepoint\n",
"# and create an edata object, which is needed for adjoint computation\n",
- "edata = amici.ExpData(2, 0, 0, np.array([float('inf')]))\n",
+ "edata = amici.ExpData(2, 0, 0, np.array([float(\"inf\")]))\n",
"edata.setObservedData([1.8] * 2)\n",
- "edata.fixedParameters = np.array([3., 5.])\n",
+ "edata.fixedParameters = np.array([3.0, 5.0])\n",
"\n",
- "model_reduced.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.newtonOnly)\n",
+ "model_reduced.setSteadyStateSensitivityMode(\n",
+ " amici.SteadyStateSensitivityMode.newtonOnly\n",
+ ")\n",
"solver_reduced = model_reduced.getSolver()\n",
"solver_reduced.setNewtonMaxSteps(10)\n",
"solver_reduced.setSensitivityMethod(amici.SensitivityMethod.adjoint)\n",
@@ -658,10 +713,16 @@
"solver_reduced.setMaxSteps(1000)\n",
"rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n",
"\n",
- "print('Status of postequilibration:', rdata_reduced['posteq_status'])\n",
- "print('Number of steps employed in postequilibration:', rdata_reduced['posteq_numsteps'])\n",
- "print('Number of backward steps employed in postequilibration:', rdata_reduced['posteq_numstepsB'])\n",
- "print('Computed gradient:', rdata_reduced['sllh'])"
+ "print(\"Status of postequilibration:\", rdata_reduced[\"posteq_status\"])\n",
+ "print(\n",
+ " \"Number of steps employed in postequilibration:\",\n",
+ " rdata_reduced[\"posteq_numsteps\"],\n",
+ ")\n",
+ "print(\n",
+ " \"Number of backward steps employed in postequilibration:\",\n",
+ " rdata_reduced[\"posteq_numstepsB\"],\n",
+ ")\n",
+ "print(\"Computed gradient:\", rdata_reduced[\"sllh\"])"
]
},
{
@@ -691,17 +752,24 @@
],
"source": [
"# Call adjoint postequilibration with model with singular Jacobian\n",
- "model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.newtonOnly)\n",
+ "model.setSteadyStateSensitivityMode(\n",
+ " amici.SteadyStateSensitivityMode.newtonOnly\n",
+ ")\n",
"solver = model.getSolver()\n",
"solver.setNewtonMaxSteps(10)\n",
"solver.setSensitivityMethod(amici.SensitivityMethod.adjoint)\n",
"solver.setSensitivityOrder(amici.SensitivityOrder.first)\n",
"rdata = amici.runAmiciSimulation(model, solver, edata)\n",
"\n",
- "print('Status of postequilibration:', rdata['posteq_status'])\n",
- "print('Number of steps employed in postequilibration:', rdata['posteq_numsteps'])\n",
- "print('Number of backward steps employed in postequilibration:', rdata['posteq_numstepsB'])\n",
- "print('Computed gradient:', rdata['sllh'])"
+ "print(\"Status of postequilibration:\", rdata[\"posteq_status\"])\n",
+ "print(\n",
+ " \"Number of steps employed in postequilibration:\", rdata[\"posteq_numsteps\"]\n",
+ ")\n",
+ "print(\n",
+ " \"Number of backward steps employed in postequilibration:\",\n",
+ " rdata[\"posteq_numstepsB\"],\n",
+ ")\n",
+ "print(\"Computed gradient:\", rdata[\"sllh\"])"
]
},
{
@@ -720,11 +788,10 @@
"outputs": [],
"source": [
"# create edata, with 3 timepoints and 2 observables:\n",
- "edata = amici.ExpData(2, 0, 0,\n",
- " np.array([0., 0.1, 1.]))\n",
+ "edata = amici.ExpData(2, 0, 0, np.array([0.0, 0.1, 1.0]))\n",
"edata.setObservedData([1.8] * 6)\n",
- "edata.fixedParameters = np.array([3., 5.])\n",
- "edata.fixedParametersPreequilibration = np.array([0., 2.])\n",
+ "edata.fixedParameters = np.array([3.0, 5.0])\n",
+ "edata.fixedParametersPreequilibration = np.array([0.0, 2.0])\n",
"edata.reinitializeFixedParameterInitialStates = True"
]
},
@@ -764,8 +831,8 @@
"solver_reduced.setNewtonMaxSteps(10)\n",
"rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n",
"\n",
- "amici.plotting.plotStateTrajectories(rdata_reduced, model = model_reduced)\n",
- "amici.plotting.plotObservableTrajectories(rdata_reduced, model = model_reduced)"
+ "amici.plotting.plotStateTrajectories(rdata_reduced, model=model_reduced)\n",
+ "amici.plotting.plotObservableTrajectories(rdata_reduced, model=model_reduced)"
]
},
{
@@ -782,7 +849,7 @@
"outputs": [],
"source": [
"# Change the last timepoint to an infinity timepoint.\n",
- "edata.setTimepoints(np.array([0., 0.1, float('inf')]))\n",
+ "edata.setTimepoints(np.array([0.0, 0.1, float(\"inf\")]))\n",
"\n",
"# run the simulation\n",
"rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)"
@@ -844,10 +911,12 @@
],
"source": [
"# No postquilibration this time.\n",
- "edata.setTimepoints(np.array([0., 0.1, 1.]))\n",
+ "edata.setTimepoints(np.array([0.0, 0.1, 1.0]))\n",
"\n",
"# create the solver object and run the simulation, singular Jacobian, enforce Newton solver for sensitivities\n",
- "model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.newtonOnly)\n",
+ "model.setSteadyStateSensitivityMode(\n",
+ " amici.SteadyStateSensitivityMode.newtonOnly\n",
+ ")\n",
"solver = model.getSolver()\n",
"solver.setNewtonMaxSteps(10)\n",
"solver.setSensitivityMethod(amici.SensitivityMethod.forward)\n",
@@ -855,8 +924,8 @@
"rdata = amici.runAmiciSimulation(model, solver, edata)\n",
"\n",
"for key, value in rdata.items():\n",
- " if key[0:6] == 'preeq_':\n",
- " print('%20s: ' % key, value)"
+ " if key[0:6] == \"preeq_\":\n",
+ " print(\"%20s: \" % key, value)"
]
},
{
@@ -883,7 +952,9 @@
],
"source": [
"# Singluar Jacobian, use simulation\n",
- "model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.integrateIfNewtonFails)\n",
+ "model.setSteadyStateSensitivityMode(\n",
+ " amici.SteadyStateSensitivityMode.integrateIfNewtonFails\n",
+ ")\n",
"solver = model.getSolver()\n",
"solver.setNewtonMaxSteps(10)\n",
"solver.setSensitivityMethod(amici.SensitivityMethod.forward)\n",
@@ -891,8 +962,8 @@
"rdata = amici.runAmiciSimulation(model, solver, edata)\n",
"\n",
"for key, value in rdata.items():\n",
- " if key[0:6] == 'preeq_':\n",
- " print('%20s: ' % key, value)"
+ " if key[0:6] == \"preeq_\":\n",
+ " print(\"%20s: \" % key, value)"
]
},
{
@@ -924,8 +995,8 @@
"rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n",
"\n",
"for key, value in rdata_reduced.items():\n",
- " if key[0:6] == 'preeq_':\n",
- " print('%20s: ' % key, value)"
+ " if key[0:6] == \"preeq_\":\n",
+ " print(\"%20s: \" % key, value)"
]
},
{
@@ -975,9 +1046,9 @@
"rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n",
"\n",
"for key, value in rdata_reduced.items():\n",
- " if key[0:6] == 'preeq_':\n",
- " print('%20s: ' % key, value)\n",
- "print('Gradient:', rdata_reduced['sllh'])"
+ " if key[0:6] == \"preeq_\":\n",
+ " print(\"%20s: \" % key, value)\n",
+ "print(\"Gradient:\", rdata_reduced[\"sllh\"])"
]
},
{
@@ -1010,9 +1081,9 @@
"rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n",
"\n",
"for key, value in rdata_reduced.items():\n",
- " if key[0:6] == 'preeq_':\n",
- " print('%20s: ' % key, value)\n",
- "print('Gradient:', rdata_reduced['sllh'])"
+ " if key[0:6] == \"preeq_\":\n",
+ " print(\"%20s: \" % key, value)\n",
+ "print(\"Gradient:\", rdata_reduced[\"sllh\"])"
]
},
{
@@ -1041,14 +1112,16 @@
"solver_reduced = model_reduced.getSolver()\n",
"solver_reduced.setNewtonMaxSteps(10)\n",
"solver_reduced.setSensitivityMethod(amici.SensitivityMethod.adjoint)\n",
- "solver_reduced.setSensitivityMethodPreequilibration(amici.SensitivityMethod.adjoint)\n",
+ "solver_reduced.setSensitivityMethodPreequilibration(\n",
+ " amici.SensitivityMethod.adjoint\n",
+ ")\n",
"solver_reduced.setSensitivityOrder(amici.SensitivityOrder.first)\n",
"rdata_reduced = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n",
"\n",
"for key, value in rdata_reduced.items():\n",
- " if key[0:6] == 'preeq_':\n",
- " print('%20s: ' % key, value)\n",
- "print('Gradient:', rdata_reduced['sllh'])"
+ " if key[0:6] == \"preeq_\":\n",
+ " print(\"%20s: \" % key, value)\n",
+ "print(\"Gradient:\", rdata_reduced[\"sllh\"])"
]
},
{
@@ -1089,9 +1162,9 @@
"rdata = amici.runAmiciSimulation(model, solver, edata)\n",
"\n",
"for key, value in rdata.items():\n",
- " if key[0:6] == 'preeq_':\n",
- " print('%20s: ' % key, value)\n",
- "print('Gradient:', rdata['sllh'])"
+ " if key[0:6] == \"preeq_\":\n",
+ " print(\"%20s: \" % key, value)\n",
+ "print(\"Gradient:\", rdata[\"sllh\"])"
]
},
{
@@ -1135,7 +1208,9 @@
],
"source": [
"# Non-singular Jacobian, use simulaiton\n",
- "model_reduced.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.integrateIfNewtonFails)\n",
+ "model_reduced.setSteadyStateSensitivityMode(\n",
+ " amici.SteadyStateSensitivityMode.integrateIfNewtonFails\n",
+ ")\n",
"solver_reduced = model_reduced.getSolver()\n",
"solver_reduced.setNewtonMaxSteps(0)\n",
"solver_reduced.setSensitivityMethod(amici.SensitivityMethod.forward)\n",
@@ -1146,27 +1221,31 @@
"solver_reduced.setAbsoluteToleranceSteadyState(1e-3)\n",
"solver_reduced.setRelativeToleranceSteadyStateSensi(1e-2)\n",
"solver_reduced.setAbsoluteToleranceSteadyStateSensi(1e-3)\n",
- "rdata_reduced_lax = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n",
+ "rdata_reduced_lax = amici.runAmiciSimulation(\n",
+ " model_reduced, solver_reduced, edata\n",
+ ")\n",
"\n",
"# run with strict tolerances\n",
"solver_reduced.setRelativeToleranceSteadyState(1e-12)\n",
"solver_reduced.setAbsoluteToleranceSteadyState(1e-16)\n",
"solver_reduced.setRelativeToleranceSteadyStateSensi(1e-12)\n",
"solver_reduced.setAbsoluteToleranceSteadyStateSensi(1e-16)\n",
- "rdata_reduced_strict = amici.runAmiciSimulation(model_reduced, solver_reduced, edata)\n",
+ "rdata_reduced_strict = amici.runAmiciSimulation(\n",
+ " model_reduced, solver_reduced, edata\n",
+ ")\n",
"\n",
"# compare ODE outputs\n",
- "print('\\nODE solver steps, which were necessary to reach steady state:')\n",
- "print('lax tolerances: ', rdata_reduced_lax['preeq_numsteps'])\n",
- "print('strict tolerances: ', rdata_reduced_strict['preeq_numsteps'])\n",
+ "print(\"\\nODE solver steps, which were necessary to reach steady state:\")\n",
+ "print(\"lax tolerances: \", rdata_reduced_lax[\"preeq_numsteps\"])\n",
+ "print(\"strict tolerances: \", rdata_reduced_strict[\"preeq_numsteps\"])\n",
"\n",
- "print('\\nsimulation time corresponding to steady state:')\n",
- "print(rdata_reduced_lax['preeq_t'])\n",
- "print(rdata_reduced_strict['preeq_t'])\n",
+ "print(\"\\nsimulation time corresponding to steady state:\")\n",
+ "print(rdata_reduced_lax[\"preeq_t\"])\n",
+ "print(rdata_reduced_strict[\"preeq_t\"])\n",
"\n",
- "print('\\ncomputation time to reach steady state:')\n",
- "print(rdata_reduced_lax['preeq_cpu_time'])\n",
- "print(rdata_reduced_strict['preeq_cpu_time'])"
+ "print(\"\\ncomputation time to reach steady state:\")\n",
+ "print(rdata_reduced_lax[\"preeq_cpu_time\"])\n",
+ "print(rdata_reduced_strict[\"preeq_cpu_time\"])"
]
}
],
diff --git a/python/examples/example_errors.ipynb b/python/examples/example_errors.ipynb
index 988afb98a3..5e07803d96 100644
--- a/python/examples/example_errors.ipynb
+++ b/python/examples/example_errors.ipynb
@@ -75,7 +75,9 @@
"outputs": [],
"source": [
"petab_problem = benchmark_models_petab.get_problem(\"Fujita_SciSignal2010\")\n",
- "amici_model = import_petab_problem(petab_problem, verbose=False, force_compile=False)\n",
+ "amici_model = import_petab_problem(\n",
+ " petab_problem, verbose=False, force_compile=False\n",
+ ")\n",
"\n",
"np.random.seed(2991)\n",
"problem_parameters = dict(\n",
@@ -85,13 +87,25 @@
" )\n",
")\n",
"res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
- " scaled_parameters=True\n",
+ " scaled_parameters=True,\n",
")\n",
- "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
- "assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_SUCCESS', 'AMICI_SUCCESS', 'AMICI_SUCCESS', 'AMICI_TOO_MUCH_WORK', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN']"
+ "print(\n",
+ " \"Status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
+ ")\n",
+ "assert [\n",
+ " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n",
+ "] == [\n",
+ " \"AMICI_SUCCESS\",\n",
+ " \"AMICI_SUCCESS\",\n",
+ " \"AMICI_SUCCESS\",\n",
+ " \"AMICI_TOO_MUCH_WORK\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ "]"
]
},
{
@@ -127,14 +141,17 @@
"amici_solver.setMaxSteps(10 * amici_solver.getMaxSteps())\n",
"\n",
"res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
" scaled_parameters=True,\n",
- " solver=amici_solver\n",
+ " solver=amici_solver,\n",
")\n",
"\n",
- "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
+ "print(\n",
+ " \"Status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
+ ")\n",
"assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])\n",
"print(\"Simulations finished succesfully.\")\n",
"print()\n",
@@ -146,15 +163,18 @@
"amici_solver.setRelativeTolerance(50 * amici_solver.getRelativeTolerance())\n",
"\n",
"res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
" scaled_parameters=True,\n",
- " solver=amici_solver\n",
+ " solver=amici_solver,\n",
+ ")\n",
+ "print(\n",
+ " \"Status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
")\n",
- "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
"assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])\n",
- "print(\"Simulations finished succesfully.\")\n"
+ "print(\"Simulations finished succesfully.\")"
]
},
{
@@ -185,13 +205,18 @@
" )\n",
")\n",
"res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
- " scaled_parameters=True\n",
+ " scaled_parameters=True,\n",
+ ")\n",
+ "print(\n",
+ " \"Status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
")\n",
- "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
- "assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_TOO_MUCH_WORK']"
+ "assert [\n",
+ " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n",
+ "] == [\"AMICI_TOO_MUCH_WORK\"]"
]
},
{
@@ -284,14 +309,26 @@
" )\n",
")\n",
"res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
- " scaled_parameters=True\n",
+ " scaled_parameters=True,\n",
+ ")\n",
+ "print(\n",
+ " \"Status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
")\n",
- "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
"\n",
- "assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_SUCCESS', 'AMICI_ERR_FAILURE', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN']"
+ "assert [\n",
+ " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n",
+ "] == [\n",
+ " \"AMICI_SUCCESS\",\n",
+ " \"AMICI_ERR_FAILURE\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ "]"
]
},
{
@@ -352,13 +389,16 @@
"amici_solver.setRelativeTolerance(amici_solver.getRelativeTolerance() / 10)\n",
"\n",
"res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
" scaled_parameters=True,\n",
- " solver=amici_solver\n",
+ " solver=amici_solver,\n",
+ ")\n",
+ "print(\n",
+ " \"Status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
")\n",
- "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
"assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])\n",
"print(\"Simulations finished succesfully.\")"
]
@@ -381,7 +421,9 @@
"outputs": [],
"source": [
"petab_problem = benchmark_models_petab.get_problem(\"Weber_BMC2015\")\n",
- "amici_model = import_petab_problem(petab_problem, verbose=False, force_compile=False)\n",
+ "amici_model = import_petab_problem(\n",
+ " petab_problem, verbose=False, force_compile=False\n",
+ ")\n",
"\n",
"np.random.seed(4)\n",
"problem_parameters = dict(\n",
@@ -391,13 +433,21 @@
" )\n",
")\n",
"res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
- " scaled_parameters=True\n",
+ " scaled_parameters=True,\n",
+ ")\n",
+ "print(\n",
+ " \"Status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
")\n",
- "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
- "assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_ERROR', 'AMICI_NOT_RUN']"
+ "assert [\n",
+ " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n",
+ "] == [\n",
+ " \"AMICI_ERROR\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ "]"
]
},
{
@@ -438,13 +488,16 @@
" )\n",
")\n",
"res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
" scaled_parameters=True,\n",
- " solver=amici_solver\n",
+ " solver=amici_solver,\n",
+ ")\n",
+ "print(\n",
+ " \"Status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
")\n",
- "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
"assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])"
]
},
@@ -476,13 +529,18 @@
" )\n",
")\n",
"res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
- " scaled_parameters=True\n",
+ " scaled_parameters=True,\n",
")\n",
- "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
- "assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_FIRST_RHSFUNC_ERR']"
+ "print(\n",
+ " \"Status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
+ ")\n",
+ "assert [\n",
+ " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n",
+ "] == [\"AMICI_FIRST_RHSFUNC_ERR\"]"
]
},
{
@@ -571,11 +629,16 @@
"source": [
"# we have to account for the chosen parameter scale\n",
"from itertools import starmap\n",
- "unscaled_parameter = dict(zip(\n",
- " amici_model.getParameterIds(),\n",
- " starmap(amici.getUnscaledParameter, zip(edata.parameters, edata.pscale)),\n",
- "))\n",
- "print(dict((p, unscaled_parameter[p]) for p in ('Kd', 'Kp', 'n_par')))"
+ "\n",
+ "unscaled_parameter = dict(\n",
+ " zip(\n",
+ " amici_model.getParameterIds(),\n",
+ " starmap(\n",
+ " amici.getUnscaledParameter, zip(edata.parameters, edata.pscale)\n",
+ " ),\n",
+ " )\n",
+ ")\n",
+ "print(dict((p, unscaled_parameter[p]) for p in (\"Kd\", \"Kp\", \"n_par\")))"
]
},
{
@@ -594,7 +657,9 @@
"metadata": {},
"outputs": [],
"source": [
- "print(f\"{x0['Z_state']**unscaled_parameter['n_par'] + unscaled_parameter['Kd']**unscaled_parameter['n_par']=}\")"
+ "print(\n",
+ " f\"{x0['Z_state']**unscaled_parameter['n_par'] + unscaled_parameter['Kd']**unscaled_parameter['n_par']=}\"\n",
+ ")"
]
},
{
@@ -631,16 +696,18 @@
"with suppress(KeyError):\n",
" del os.environ[\"AMICI_EXPERIMENTAL_SBML_NONCONST_CLS\"]\n",
"amici_model = import_petab_problem(\n",
- " petab_problem, \n",
+ " petab_problem,\n",
" verbose=False,\n",
" force_compile=True,\n",
- " model_name=\"Blasi_CellSystems2016_1\"\n",
+ " model_name=\"Blasi_CellSystems2016_1\",\n",
")\n",
"\n",
"amici_solver = amici_model.getSolver()\n",
"amici_solver.setSensitivityMethod(amici.SensitivityMethod.forward)\n",
"amici_solver.setSensitivityOrder(amici.SensitivityOrder.first)\n",
- "amici_model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.newtonOnly)\n",
+ "amici_model.setSteadyStateSensitivityMode(\n",
+ " amici.SteadyStateSensitivityMode.newtonOnly\n",
+ ")\n",
"\n",
"np.random.seed(2020)\n",
"problem_parameters = dict(\n",
@@ -650,17 +717,22 @@
" )\n",
")\n",
"res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
" scaled_parameters=True,\n",
" solver=amici_solver,\n",
")\n",
- "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
+ "print(\n",
+ " \"Status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
+ ")\n",
"\n",
"# hard to reproduce on GHA\n",
- "if os.getenv('GITHUB_ACTIONS') is None:\n",
- " assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_ERROR']"
+ "if os.getenv(\"GITHUB_ACTIONS\") is None:\n",
+ " assert [\n",
+ " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n",
+ " ] == [\"AMICI_ERROR\"]"
]
},
{
@@ -711,16 +783,21 @@
"outputs": [],
"source": [
"# use numerical integration\n",
- "amici_model.setSteadyStateSensitivityMode(amici.SteadyStateSensitivityMode.integrationOnly)\n",
+ "amici_model.setSteadyStateSensitivityMode(\n",
+ " amici.SteadyStateSensitivityMode.integrationOnly\n",
+ ")\n",
"\n",
"res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
" scaled_parameters=True,\n",
" solver=amici_solver,\n",
")\n",
- "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
+ "print(\n",
+ " \"Status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
+ ")\n",
"assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])"
]
},
@@ -736,7 +813,7 @@
"# this is enabled by the `AMICI_EXPERIMENTAL_SBML_NONCONST_CLS` environment variable\n",
"os.environ[\"AMICI_EXPERIMENTAL_SBML_NONCONST_CLS\"] = \"1\"\n",
"amici_model = import_petab_problem(\n",
- " petab_problem, \n",
+ " petab_problem,\n",
" verbose=False,\n",
" # we need a different model name if we import the model again\n",
" # we cannot load a model with the same name as an already loaded model\n",
@@ -750,13 +827,16 @@
"amici_solver.setSensitivityOrder(amici.SensitivityOrder.first)\n",
"\n",
"res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
" scaled_parameters=True,\n",
" solver=amici_solver,\n",
")\n",
- "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
+ "print(\n",
+ " \"Status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
+ ")\n",
"assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])"
]
},
@@ -779,7 +859,7 @@
"source": [
"petab_problem = benchmark_models_petab.get_problem(\"Brannmark_JBC2010\")\n",
"amici_model = import_petab_problem(\n",
- " petab_problem, \n",
+ " petab_problem,\n",
" verbose=False,\n",
")\n",
"\n",
@@ -794,18 +874,32 @@
" )\n",
")\n",
"res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
" scaled_parameters=True,\n",
" solver=amici_solver,\n",
")\n",
- " \n",
- "print(\"Status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
+ "\n",
+ "print(\n",
+ " \"Status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
+ ")\n",
"\n",
"# hard to reproduce on GHA\n",
- "if os.getenv('GITHUB_ACTIONS') is None:\n",
- " assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_ERROR', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN']"
+ "if os.getenv(\"GITHUB_ACTIONS\") is None:\n",
+ " assert [\n",
+ " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n",
+ " ] == [\n",
+ " \"AMICI_ERROR\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " ]"
]
},
{
@@ -858,23 +952,30 @@
"source": [
"# Reduce relative tolerance for integration\n",
"amici_solver = amici_model.getSolver()\n",
- "amici_solver.setRelativeTolerance(1/100 * amici_solver.getRelativeTolerance())\n",
+ "amici_solver.setRelativeTolerance(\n",
+ " 1 / 100 * amici_solver.getRelativeTolerance()\n",
+ ")\n",
"\n",
"res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
" scaled_parameters=True,\n",
" solver=amici_solver,\n",
")\n",
- "print(\"status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
+ "print(\n",
+ " \"status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
+ ")\n",
"\n",
"rdata = res[RDATAS][0]\n",
- "print(f\"preeq_status={list(map(amici.SteadyStateStatus, rdata.preeq_status.flatten()))}\")\n",
+ "print(\n",
+ " f\"preeq_status={list(map(amici.SteadyStateStatus, rdata.preeq_status.flatten()))}\"\n",
+ ")\n",
"print(f\"{rdata.preeq_numsteps=}\")\n",
"\n",
"# hard to reproduce on GHA\n",
- "if os.getenv('GITHUB_ACTIONS') is None:\n",
+ "if os.getenv(\"GITHUB_ACTIONS\") is None:\n",
" assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])"
]
},
@@ -889,25 +990,35 @@
"for log10_relaxation_factor in range(1, 10):\n",
" print(f\"Relaxing tolerances by factor {10 ** log10_relaxation_factor}\")\n",
" amici_solver = amici_model.getSolver()\n",
- " amici_solver.setRelativeToleranceSteadyState(amici_solver.getRelativeToleranceSteadyState() * 10 ** log10_relaxation_factor)\n",
- " \n",
+ " amici_solver.setRelativeToleranceSteadyState(\n",
+ " amici_solver.getRelativeToleranceSteadyState()\n",
+ " * 10**log10_relaxation_factor\n",
+ " )\n",
+ "\n",
" res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
" scaled_parameters=True,\n",
" solver=amici_solver,\n",
" )\n",
" if all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS]):\n",
- " print(f\"-> Succeeded with relative steady state tolerance {amici_solver.getRelativeToleranceSteadyState()}\\n\")\n",
+ " print(\n",
+ " f\"-> Succeeded with relative steady state tolerance {amici_solver.getRelativeToleranceSteadyState()}\\n\"\n",
+ " )\n",
" break\n",
" else:\n",
" print(\"-> Failed.\\n\")\n",
"\n",
- "print(\"status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
+ "print(\n",
+ " \"status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
+ ")\n",
"\n",
"rdata = res[RDATAS][0]\n",
- "print(f\"preeq_status={list(map(amici.SteadyStateStatus, rdata.preeq_status.flatten()))}\")\n",
+ "print(\n",
+ " f\"preeq_status={list(map(amici.SteadyStateStatus, rdata.preeq_status.flatten()))}\"\n",
+ ")\n",
"print(f\"{rdata.preeq_numsteps=}\")\n",
"assert all(rdata.status == amici.AMICI_SUCCESS for rdata in res[RDATAS])"
]
@@ -928,26 +1039,42 @@
"outputs": [],
"source": [
"# Let's try increasing the number of Newton steps\n",
- "# (this is 0 by default, so the Newton solver wasn't used before, \n",
+ "# (this is 0 by default, so the Newton solver wasn't used before,\n",
"# as can be seen from the 0 in `rdata.preeq_numsteps[0]`)\n",
"amici_solver = amici_model.getSolver()\n",
"amici_solver.setNewtonMaxSteps(10**4)\n",
"\n",
"res = simulate_petab(\n",
- " petab_problem=petab_problem, \n",
+ " petab_problem=petab_problem,\n",
" amici_model=amici_model,\n",
" problem_parameters=problem_parameters,\n",
" scaled_parameters=True,\n",
" solver=amici_solver,\n",
")\n",
- "print(\"status:\", [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]])\n",
+ "print(\n",
+ " \"status:\",\n",
+ " [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]],\n",
+ ")\n",
"\n",
"rdata = res[RDATAS][0]\n",
- "print(f\"preeq_status={list(map(amici.SteadyStateStatus, rdata.preeq_status.flatten()))}\")\n",
+ "print(\n",
+ " f\"preeq_status={list(map(amici.SteadyStateStatus, rdata.preeq_status.flatten()))}\"\n",
+ ")\n",
"print(f\"{rdata.preeq_numsteps=}\")\n",
"# hard to reproduce on GHA\n",
- "if os.getenv('GITHUB_ACTIONS') is None:\n",
- " assert [amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]] == ['AMICI_ERROR', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN', 'AMICI_NOT_RUN']"
+ "if os.getenv(\"GITHUB_ACTIONS\") is None:\n",
+ " assert [\n",
+ " amici.simulation_status_to_str(rdata.status) for rdata in res[RDATAS]\n",
+ " ] == [\n",
+ " \"AMICI_ERROR\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " \"AMICI_NOT_RUN\",\n",
+ " ]"
]
},
{
diff --git a/python/examples/example_jax/ExampleJax.ipynb b/python/examples/example_jax/ExampleJax.ipynb
index 9d79674287..efda5b458e 100644
--- a/python/examples/example_jax/ExampleJax.ipynb
+++ b/python/examples/example_jax/ExampleJax.ipynb
@@ -46,8 +46,9 @@
"outputs": [],
"source": [
"import petab\n",
+ "\n",
"model_name = \"Boehm_JProteomeRes2014\"\n",
- "yaml_file = f'https://raw.githubusercontent.com/Benchmarking-Initiative/Benchmark-Models-PEtab/master/Benchmark-Models/{model_name}/{model_name}.yaml'\n",
+ "yaml_file = f\"https://raw.githubusercontent.com/Benchmarking-Initiative/Benchmark-Models-PEtab/master/Benchmark-Models/{model_name}/{model_name}.yaml\"\n",
"petab_problem = petab.Problem.from_yaml(yaml_file)"
]
},
@@ -262,7 +263,10 @@
"outputs": [],
"source": [
"from amici.petab_import import import_petab_problem\n",
- "amici_model = import_petab_problem(petab_problem, force_compile=True, verbose=False)"
+ "\n",
+ "amici_model = import_petab_problem(\n",
+ " petab_problem, force_compile=True, verbose=False\n",
+ ")"
]
},
{
@@ -292,13 +296,15 @@
"source": [
"from amici.petab_objective import simulate_petab\n",
"import amici\n",
+ "\n",
"amici_solver = amici_model.getSolver()\n",
"amici_solver.setSensitivityOrder(amici.SensitivityOrder.first)\n",
"\n",
+ "\n",
"def amici_hcb_base(parameters: jnp.array):\n",
" return simulate_petab(\n",
- " petab_problem, \n",
- " amici_model, \n",
+ " petab_problem,\n",
+ " amici_model,\n",
" problem_parameters=dict(zip(petab_problem.x_free_ids, parameters)),\n",
" solver=amici_solver,\n",
" )"
@@ -320,13 +326,14 @@
"outputs": [],
"source": [
"def amici_hcb_llh(parameters: jnp.array):\n",
- " return amici_hcb_base(parameters)['llh']\n",
+ " return amici_hcb_base(parameters)[\"llh\"]\n",
+ "\n",
"\n",
"def amici_hcb_sllh(parameters: jnp.array):\n",
- " sllh = amici_hcb_base(parameters)['sllh']\n",
- " return jnp.asarray(tuple(\n",
- " sllh[par_id] for par_id in petab_problem.x_free_ids\n",
- " ))"
+ " sllh = amici_hcb_base(parameters)[\"sllh\"]\n",
+ " return jnp.asarray(\n",
+ " tuple(sllh[par_id] for par_id in petab_problem.x_free_ids)\n",
+ " )"
]
},
{
@@ -348,6 +355,8 @@
"from jax import custom_jvp\n",
"\n",
"import numpy as np\n",
+ "\n",
+ "\n",
"@custom_jvp\n",
"def jax_objective(parameters: jnp.array):\n",
" return hcb.call(\n",
@@ -380,7 +389,9 @@
" sllh = hcb.call(\n",
" amici_hcb_sllh,\n",
" parameters,\n",
- " result_shape=jax.ShapeDtypeStruct((petab_problem.parameter_df.estimate.sum(),), np.float64),\n",
+ " result_shape=jax.ShapeDtypeStruct(\n",
+ " (petab_problem.parameter_df.estimate.sum(),), np.float64\n",
+ " ),\n",
" )\n",
" return llh, sllh.dot(x_dot)"
]
@@ -402,18 +413,25 @@
"source": [
"from jax import value_and_grad\n",
"\n",
- "parameter_scales = petab_problem.parameter_df.loc[petab_problem.x_free_ids, petab.PARAMETER_SCALE].values\n",
+ "parameter_scales = petab_problem.parameter_df.loc[\n",
+ " petab_problem.x_free_ids, petab.PARAMETER_SCALE\n",
+ "].values\n",
+ "\n",
"\n",
"@jax.jit\n",
"@value_and_grad\n",
"def jax_objective_with_parameter_transform(parameters: jnp.array):\n",
- " par_scaled = jnp.asarray(tuple(\n",
- " value if scale == petab.LIN\n",
- " else jnp.exp(value) if scale == petab.LOG\n",
- " else jnp.power(10, value)\n",
- " for value, scale in zip(parameters, parameter_scales)\n",
- " ))\n",
- " return jax_objective(par_scaled)\n"
+ " par_scaled = jnp.asarray(\n",
+ " tuple(\n",
+ " value\n",
+ " if scale == petab.LIN\n",
+ " else jnp.exp(value)\n",
+ " if scale == petab.LOG\n",
+ " else jnp.power(10, value)\n",
+ " for value, scale in zip(parameters, parameter_scales)\n",
+ " )\n",
+ " )\n",
+ " return jax_objective(par_scaled)"
]
},
{
@@ -445,7 +463,9 @@
"metadata": {},
"outputs": [],
"source": [
- "llh_jax, sllh_jax = jax_objective_with_parameter_transform(scaled_parameters_np)"
+ "llh_jax, sllh_jax = jax_objective_with_parameter_transform(\n",
+ " scaled_parameters_np\n",
+ ")"
]
},
{
@@ -464,12 +484,12 @@
"outputs": [],
"source": [
"r = simulate_petab(\n",
- " petab_problem, \n",
- " amici_model, \n",
+ " petab_problem,\n",
+ " amici_model,\n",
" solver=amici_solver,\n",
" scaled_parameters=True,\n",
" scaled_gradients=True,\n",
- " problem_parameters=scaled_parameters\n",
+ " problem_parameters=scaled_parameters,\n",
")"
]
},
@@ -528,7 +548,15 @@
],
"source": [
"import pandas as pd\n",
- "pd.DataFrame(dict(amici=r['llh'], jax=float(llh_jax), rel_diff=(r['llh']-float(llh_jax))/r['llh']), index=('llh',))"
+ "\n",
+ "pd.DataFrame(\n",
+ " dict(\n",
+ " amici=r[\"llh\"],\n",
+ " jax=float(llh_jax),\n",
+ " rel_diff=(r[\"llh\"] - float(llh_jax)) / r[\"llh\"],\n",
+ " ),\n",
+ " index=(\"llh\",),\n",
+ ")"
]
},
{
@@ -641,10 +669,13 @@
}
],
"source": [
- "grad_amici=np.asarray(list(r['sllh'].values()))\n",
- "grad_jax=np.asarray(sllh_jax)\n",
- "rel_diff=(grad_amici-grad_jax)/grad_jax\n",
- "pd.DataFrame(index=r['sllh'].keys(), data=dict(amici=grad_amici, jax=grad_jax, rel_diff=rel_diff))"
+ "grad_amici = np.asarray(list(r[\"sllh\"].values()))\n",
+ "grad_jax = np.asarray(sllh_jax)\n",
+ "rel_diff = (grad_amici - grad_jax) / grad_jax\n",
+ "pd.DataFrame(\n",
+ " index=r[\"sllh\"].keys(),\n",
+ " data=dict(amici=grad_amici, jax=grad_jax, rel_diff=rel_diff),\n",
+ ")"
]
},
{
@@ -664,7 +695,9 @@
"outputs": [],
"source": [
"jax.config.update(\"jax_enable_x64\", True)\n",
- "llh_jax, sllh_jax = jax_objective_with_parameter_transform(scaled_parameters_np)"
+ "llh_jax, sllh_jax = jax_objective_with_parameter_transform(\n",
+ " scaled_parameters_np\n",
+ ")"
]
},
{
@@ -729,7 +762,14 @@
}
],
"source": [
- "pd.DataFrame(dict(amici=r['llh'], jax=float(llh_jax), rel_diff=(r['llh']-float(llh_jax))/r['llh']), index=('llh',))"
+ "pd.DataFrame(\n",
+ " dict(\n",
+ " amici=r[\"llh\"],\n",
+ " jax=float(llh_jax),\n",
+ " rel_diff=(r[\"llh\"] - float(llh_jax)) / r[\"llh\"],\n",
+ " ),\n",
+ " index=(\"llh\",),\n",
+ ")"
]
},
{
@@ -842,10 +882,13 @@
}
],
"source": [
- "grad_amici=np.asarray(list(r['sllh'].values()))\n",
- "grad_jax=np.asarray(sllh_jax)\n",
- "rel_diff=(grad_amici-grad_jax)/grad_jax\n",
- "pd.DataFrame(index=r['sllh'].keys(), data=dict(amici=grad_amici, jax=grad_jax, rel_diff=rel_diff))"
+ "grad_amici = np.asarray(list(r[\"sllh\"].values()))\n",
+ "grad_jax = np.asarray(sllh_jax)\n",
+ "rel_diff = (grad_amici - grad_jax) / grad_jax\n",
+ "pd.DataFrame(\n",
+ " index=r[\"sllh\"].keys(),\n",
+ " data=dict(amici=grad_amici, jax=grad_jax, rel_diff=rel_diff),\n",
+ ")"
]
}
],
diff --git a/python/examples/example_large_models/example_performance_optimization.ipynb b/python/examples/example_large_models/example_performance_optimization.ipynb
index e3dd72b6cd..31a9fc1729 100644
--- a/python/examples/example_large_models/example_performance_optimization.ipynb
+++ b/python/examples/example_large_models/example_performance_optimization.ipynb
@@ -39,7 +39,7 @@
"import matplotlib.pyplot as plt\n",
"import pandas as pd\n",
"\n",
- "plt.rcParams.update({'font.size': 12})"
+ "plt.rcParams.update({\"font.size\": 12})"
]
},
{
@@ -122,7 +122,9 @@
"plt.bar([\"True\", \"False\"], [873.54, 697.85])\n",
"plt.xlabel(\"generate_sensitivity_code\")\n",
"plt.ylabel(\"Import time (s)\")\n",
- "plt.title(\"Import speed-up when not generating sensitivity code\\n(Froehlich_CellSystems2018)\");\n",
+ "plt.title(\n",
+ " \"Import speed-up when not generating sensitivity code\\n(Froehlich_CellSystems2018)\"\n",
+ ")\n",
"plt.show()\n",
"\n",
"print(f\"speedup: {873.54/697.85:.2f}x\")"
@@ -217,9 +219,11 @@
" plt.ylabel(\"Import time (s)\")\n",
" plt.ylim(ymin=0)\n",
" plt.show()\n",
- " \n",
+ "\n",
" import_times = df.sort_values(\"nprocs\")[\"time\"].values\n",
- " percent_change = (import_times[0] - min(import_times[1:])) / import_times[0] * 100\n",
+ " percent_change = (\n",
+ " (import_times[0] - min(import_times[1:])) / import_times[0] * 100\n",
+ " )\n",
" if percent_change > 0:\n",
" print(f\"Import time decreased by up to ~{percent_change:.0f}%.\")\n",
" else:\n",
@@ -281,14 +285,19 @@
"source": [
"figsize(8, 4)\n",
"compilation_time_s = [3022.453, 289.518]\n",
- "labels = [\"g++ (Ubuntu 12.2.0-3ubuntu1) 12.2.0\", \"Ubuntu clang version 15.0.2-1\"]\n",
+ "labels = [\n",
+ " \"g++ (Ubuntu 12.2.0-3ubuntu1) 12.2.0\",\n",
+ " \"Ubuntu clang version 15.0.2-1\",\n",
+ "]\n",
"plt.bar(labels, compilation_time_s)\n",
"plt.ylim(ymin=0)\n",
"plt.title(\"Choice of compiler - FröhlichGer2022\")\n",
"plt.xlabel(\"Compiler\")\n",
- "plt.ylabel(\"Walltime for compilation (s)\");\n",
+ "plt.ylabel(\"Walltime for compilation (s)\")\n",
"plt.show()\n",
- "print(f\"Clang was ~{compilation_time_s[0] / compilation_time_s[1]:.0f}x as fast as g++.\")"
+ "print(\n",
+ " f\"Clang was ~{compilation_time_s[0] / compilation_time_s[1]:.0f}x as fast as g++.\"\n",
+ ")"
]
},
{
@@ -360,10 +369,12 @@
" plt.ylabel(\"Compile time (s)\")\n",
" plt.ylim(ymin=0)\n",
" plt.show()\n",
- " \n",
+ "\n",
" compilation_time_s = df.sort_values(\"nprocs\")[\"time\"].values\n",
- " print(\"We were able to reduce compile time by up to \"\n",
- " f\"~{(compilation_time_s[0] - min(compilation_time_s[1:])) / compilation_time_s[0] * 100:.0f}%.\")"
+ " print(\n",
+ " \"We were able to reduce compile time by up to \"\n",
+ " f\"~{(compilation_time_s[0] - min(compilation_time_s[1:])) / compilation_time_s[0] * 100:.0f}%.\"\n",
+ " )"
]
},
{
diff --git a/python/examples/example_petab/petab.ipynb b/python/examples/example_petab/petab.ipynb
index 27ee96e449..689d793f56 100644
--- a/python/examples/example_petab/petab.ipynb
+++ b/python/examples/example_petab/petab.ipynb
@@ -39,10 +39,10 @@
"output_type": "stream",
"text": [
"Cloning into 'tmp/benchmark-models'...\n",
- "remote: Enumerating objects: 142, done.\u001B[K\n",
- "remote: Counting objects: 100% (142/142), done.\u001B[K\n",
- "remote: Compressing objects: 100% (122/122), done.\u001B[K\n",
- "remote: Total 142 (delta 41), reused 104 (delta 18), pack-reused 0\u001B[K\n",
+ "remote: Enumerating objects: 142, done.\u001b[K\n",
+ "remote: Counting objects: 100% (142/142), done.\u001b[K\n",
+ "remote: Compressing objects: 100% (122/122), done.\u001b[K\n",
+ "remote: Total 142 (delta 41), reused 104 (delta 18), pack-reused 0\u001b[K\n",
"Receiving objects: 100% (142/142), 648.29 KiB | 1.23 MiB/s, done.\n",
"Resolving deltas: 100% (41/41), done.\n"
]
@@ -335,10 +335,15 @@
],
"source": [
"parameters = {\n",
- " x_id: x_val for x_id, x_val in\n",
- " zip(petab_problem.x_ids, petab_problem.x_nominal_scaled)\n",
+ " x_id: x_val\n",
+ " for x_id, x_val in zip(petab_problem.x_ids, petab_problem.x_nominal_scaled)\n",
"}\n",
- "simulate_petab(petab_problem, amici_model, problem_parameters=parameters, scaled_parameters=True)"
+ "simulate_petab(\n",
+ " petab_problem,\n",
+ " amici_model,\n",
+ " problem_parameters=parameters,\n",
+ " scaled_parameters=True,\n",
+ ")"
]
},
{
diff --git a/python/examples/example_presimulation/ExampleExperimentalConditions.ipynb b/python/examples/example_presimulation/ExampleExperimentalConditions.ipynb
index 07da70c02f..63fbc7a4ff 100644
--- a/python/examples/example_presimulation/ExampleExperimentalConditions.ipynb
+++ b/python/examples/example_presimulation/ExampleExperimentalConditions.ipynb
@@ -15,9 +15,9 @@
"outputs": [],
"source": [
"# SBML model we want to import\n",
- "sbml_file = 'model_presimulation.xml'\n",
+ "sbml_file = \"model_presimulation.xml\"\n",
"# Name of the model that will also be the name of the python module\n",
- "model_name = 'model_presimulation'\n",
+ "model_name = \"model_presimulation\"\n",
"# Directory to which the generated model code is written\n",
"model_output_dir = model_name\n",
"\n",
@@ -86,22 +86,45 @@
"sbml_doc = sbml_reader.readSBML(sbml_file)\n",
"sbml_model = sbml_doc.getModel()\n",
"\n",
- "print('Species:')\n",
- "pprint([(s.getId(),s.getName()) for s in sbml_model.getListOfSpecies()])\n",
+ "print(\"Species:\")\n",
+ "pprint([(s.getId(), s.getName()) for s in sbml_model.getListOfSpecies()])\n",
"\n",
- "print('\\nReactions:')\n",
+ "print(\"\\nReactions:\")\n",
"for reaction in sbml_model.getListOfReactions():\n",
- " reactants = ' + '.join(['%s %s'%(int(r.getStoichiometry()) if r.getStoichiometry() > 1 else '', r.getSpecies()) for r in reaction.getListOfReactants()])\n",
- " products = ' + '.join(['%s %s'%(int(r.getStoichiometry()) if r.getStoichiometry() > 1 else '', r.getSpecies()) for r in reaction.getListOfProducts()])\n",
- " reversible = '<' if reaction.getReversible() else ''\n",
- " print('%3s: %10s %1s->%10s\\t\\t[%s]' % (reaction.getName(), \n",
- " reactants,\n",
- " reversible,\n",
- " products,\n",
- " libsbml.formulaToL3String(reaction.getKineticLaw().getMath())))\n",
- " \n",
- "print('Parameters:')\n",
- "pprint([(p.getId(),p.getName()) for p in sbml_model.getListOfParameters()])"
+ " reactants = \" + \".join(\n",
+ " [\n",
+ " \"%s %s\"\n",
+ " % (\n",
+ " int(r.getStoichiometry()) if r.getStoichiometry() > 1 else \"\",\n",
+ " r.getSpecies(),\n",
+ " )\n",
+ " for r in reaction.getListOfReactants()\n",
+ " ]\n",
+ " )\n",
+ " products = \" + \".join(\n",
+ " [\n",
+ " \"%s %s\"\n",
+ " % (\n",
+ " int(r.getStoichiometry()) if r.getStoichiometry() > 1 else \"\",\n",
+ " r.getSpecies(),\n",
+ " )\n",
+ " for r in reaction.getListOfProducts()\n",
+ " ]\n",
+ " )\n",
+ " reversible = \"<\" if reaction.getReversible() else \"\"\n",
+ " print(\n",
+ " \"%3s: %10s %1s->%10s\\t\\t[%s]\"\n",
+ " % (\n",
+ " reaction.getName(),\n",
+ " reactants,\n",
+ " reversible,\n",
+ " products,\n",
+ " libsbml.formulaToL3String(reaction.getKineticLaw().getMath()),\n",
+ " )\n",
+ " )\n",
+ "\n",
+ "print(\"Parameters:\")\n",
+ "pprint([(p.getId(), p.getName()) for p in sbml_model.getListOfParameters()])"
]
},
{
@@ -152,7 +175,12 @@
],
"source": [
"from IPython.display import IFrame\n",
- "IFrame('https://amici.readthedocs.io/en/latest/glossary.html#term-fixed-parameters', width=600, height=175)"
+ "\n",
+ "IFrame(\n",
+ " \"https://amici.readthedocs.io/en/latest/glossary.html#term-fixed-parameters\",\n",
+ " width=600,\n",
+ " height=175,\n",
+ ")"
]
},
{
@@ -161,7 +189,7 @@
"metadata": {},
"outputs": [],
"source": [
- "fixedParameters = ['DRUG_0','KIN_0']"
+ "fixedParameters = [\"DRUG_0\", \"KIN_0\"]"
]
},
{
@@ -190,10 +218,10 @@
"source": [
"# Retrieve model output names and formulae from AssignmentRules and remove the respective rules\n",
"observables = amici.assignmentRules2observables(\n",
- " sbml_importer.sbml, # the libsbml model object\n",
- " filter_function=lambda variable: variable.getName() == 'pPROT' \n",
- " )\n",
- "print('Observables:')\n",
+ " sbml_importer.sbml, # the libsbml model object\n",
+ " filter_function=lambda variable: variable.getName() == \"pPROT\",\n",
+ ")\n",
+ "print(\"Observables:\")\n",
"pprint(observables)"
]
},
@@ -210,11 +238,13 @@
"metadata": {},
"outputs": [],
"source": [
- "sbml_importer.sbml2amici(model_name, \n",
- " model_output_dir, \n",
- " verbose=False,\n",
- " observables=observables,\n",
- " constant_parameters=fixedParameters)\n",
+ "sbml_importer.sbml2amici(\n",
+ " model_name,\n",
+ " model_output_dir,\n",
+ " verbose=False,\n",
+ " observables=observables,\n",
+ " constant_parameters=fixedParameters,\n",
+ ")\n",
"# load the generated module\n",
"model_module = amici.import_model_module(model_name, model_output_dir)"
]
@@ -266,7 +296,7 @@
],
"source": [
"# Run simulation using default model parameters and solver options\n",
- "model.setTimepoints(np.linspace(0, 60, 60)) \n",
+ "model.setTimepoints(np.linspace(0, 60, 60))\n",
"rdata = amici.runAmiciSimulation(model, solver)\n",
"amici.plotting.plotObservableTrajectories(rdata)"
]
@@ -298,7 +328,7 @@
],
"source": [
"edata = amici.ExpData(rdata, 0.1, 0.0)\n",
- "edata.fixedParameters = [0,2]\n",
+ "edata.fixedParameters = [0, 2]\n",
"rdata = amici.runAmiciSimulation(model, solver, edata)\n",
"amici.plotting.plotObservableTrajectories(rdata)"
]
@@ -330,7 +360,7 @@
}
],
"source": [
- "edata.fixedParametersPreequilibration = [3,0]\n",
+ "edata.fixedParametersPreequilibration = [3, 0]\n",
"rdata = amici.runAmiciSimulation(model, solver, edata)\n",
"amici.plotting.plotObservableTrajectories(rdata)"
]
diff --git a/python/examples/example_presimulation/createModelPresimulation.py b/python/examples/example_presimulation/createModelPresimulation.py
index 4806b67647..1db454a6b5 100644
--- a/python/examples/example_presimulation/createModelPresimulation.py
+++ b/python/examples/example_presimulation/createModelPresimulation.py
@@ -1,7 +1,15 @@
import os
import pysb.export
-from pysb.core import Expression, Initial, Model, Monomer, Observable, Parameter, Rule
+from pysb.core import (
+ Expression,
+ Initial,
+ Model,
+ Monomer,
+ Observable,
+ Parameter,
+ Rule,
+)
model = Model()
@@ -41,7 +49,8 @@
Rule(
"PROT_dephospho",
- prot(phospho="p", drug=None, kin=None) >> prot(phospho="u", drug=None, kin=None),
+ prot(phospho="p", drug=None, kin=None)
+ >> prot(phospho="u", drug=None, kin=None),
Parameter("kdephospho_prot", 0.1),
)
diff --git a/python/examples/example_splines/ExampleSplines.ipynb b/python/examples/example_splines/ExampleSplines.ipynb
index 593c84e3b9..d376ba91e5 100644
--- a/python/examples/example_splines/ExampleSplines.ipynb
+++ b/python/examples/example_splines/ExampleSplines.ipynb
@@ -40,36 +40,61 @@
"from matplotlib import pyplot as plt\n",
"\n",
"# Choose build directory\n",
- "BUILD_PATH = None # temporary folder\n",
+ "BUILD_PATH = None # temporary folder\n",
"# BUILD_PATH = 'build' # specified folder for debugging\n",
"if BUILD_PATH is not None:\n",
" # Remove previous models\n",
" rmtree(BUILD_PATH, ignore_errors=True)\n",
" os.mkdir(BUILD_PATH)\n",
- " \n",
+ "\n",
+ "\n",
"def simulate(sbml_model, parameters=None, *, model_name=None, **kwargs):\n",
" if model_name is None:\n",
- " model_name = 'model_' + uuid1().hex\n",
+ " model_name = \"model_\" + uuid1().hex\n",
" if BUILD_PATH is None:\n",
" with TemporaryDirectory() as build_dir:\n",
- " return _simulate(sbml_model, parameters, build_dir=build_dir, model_name=model_name, **kwargs)\n",
+ " return _simulate(\n",
+ " sbml_model,\n",
+ " parameters,\n",
+ " build_dir=build_dir,\n",
+ " model_name=model_name,\n",
+ " **kwargs\n",
+ " )\n",
" else:\n",
" build_dir = os.path.join(BUILD_PATH, model_name)\n",
" rmtree(build_dir, ignore_errors=True)\n",
- " return _simulate(sbml_model, parameters, build_dir=build_dir, model_name=model_name, **kwargs)\n",
+ " return _simulate(\n",
+ " sbml_model,\n",
+ " parameters,\n",
+ " build_dir=build_dir,\n",
+ " model_name=model_name,\n",
+ " **kwargs\n",
+ " )\n",
"\n",
- "def _simulate(sbml_model, parameters, *, build_dir, model_name, T=1, discard_annotations=False, plot=True):\n",
+ "\n",
+ "def _simulate(\n",
+ " sbml_model,\n",
+ " parameters,\n",
+ " *,\n",
+ " build_dir,\n",
+ " model_name,\n",
+ " T=1,\n",
+ " discard_annotations=False,\n",
+ " plot=True\n",
+ "):\n",
" if parameters is None:\n",
" parameters = {}\n",
" # Build the model module from the SBML file\n",
- " sbml_importer = amici.SbmlImporter(sbml_model, discard_annotations=discard_annotations)\n",
+ " sbml_importer = amici.SbmlImporter(\n",
+ " sbml_model, discard_annotations=discard_annotations\n",
+ " )\n",
" sbml_importer.sbml2amici(model_name, build_dir)\n",
" # Import the model module\n",
" sys.path.insert(0, os.path.abspath(build_dir))\n",
" model_module = import_module(model_name)\n",
" # Setup simulation timepoints and parameters\n",
" model = model_module.getModel()\n",
- " for (name, value) in parameters.items():\n",
+ " for name, value in parameters.items():\n",
" model.setParameterByName(name, value)\n",
" if isinstance(T, (int, float)):\n",
" T = np.linspace(0, T, 100)\n",
@@ -82,7 +107,7 @@
" # Plot results\n",
" if plot:\n",
" fig, ax = plt.subplots()\n",
- " ax.plot(rdata['t'], rdata['x'])\n",
+ " ax.plot(rdata[\"t\"], rdata[\"x\"])\n",
" ax.set_xlabel(\"time\")\n",
" ax.set_ylabel(\"concentration\")\n",
" return model, rdata"
@@ -161,7 +186,7 @@
}
],
"source": [
- "simulate('example_splines.xml', dict(f=1));"
+ "simulate(\"example_splines.xml\", dict(f=1));"
]
},
{
@@ -204,8 +229,8 @@
"source": [
"# Create a spline object\n",
"spline = amici.splines.CubicHermiteSpline(\n",
- " sbml_id='f',\n",
- " evaluate_at=amici.sbml_utils.amici_time_symbol, # the spline function is evaluated at the current time point\n",
+ " sbml_id=\"f\",\n",
+ " evaluate_at=amici.sbml_utils.amici_time_symbol, # the spline function is evaluated at the current time point\n",
" nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=3),\n",
" values_at_nodes=[1, -1, 2],\n",
")"
@@ -256,7 +281,7 @@
],
"source": [
"# Plot the spline\n",
- "spline.plot(xlabel='time');"
+ "spline.plot(xlabel=\"time\");"
]
},
{
@@ -269,7 +294,8 @@
"source": [
"# Load SBML model using libsbml\n",
"import libsbml\n",
- "sbml_doc = libsbml.SBMLReader().readSBML('example_splines.xml')\n",
+ "\n",
+ "sbml_doc = libsbml.SBMLReader().readSBML(\"example_splines.xml\")\n",
"sbml_model = sbml_doc.getModel()\n",
"# We can add the spline assignment rule to the SBML model\n",
"spline.add_to_sbml_model(sbml_model)"
@@ -307,7 +333,7 @@
"outputs": [],
"source": [
"# Final value should be equal to the integral computed above\n",
- "assert np.allclose(rdata['x'][-1], float(spline.integrate(0.0, 1.0)))"
+ "assert np.allclose(rdata[\"x\"][-1], float(spline.integrate(0.0, 1.0)))"
]
},
{
@@ -466,10 +492,10 @@
"outputs": [],
"source": [
"spline = amici.splines.CubicHermiteSpline(\n",
- " sbml_id='f',\n",
+ " sbml_id=\"f\",\n",
" evaluate_at=amici.sbml_utils.amici_time_symbol,\n",
" nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=3),\n",
- " values_at_nodes=sp.symbols('f0:3'),\n",
+ " values_at_nodes=sp.symbols(\"f0:3\"),\n",
")"
]
},
@@ -481,7 +507,7 @@
},
"outputs": [],
"source": [
- "sbml_doc = libsbml.SBMLReader().readSBML('example_splines.xml')\n",
+ "sbml_doc = libsbml.SBMLReader().readSBML(\"example_splines.xml\")\n",
"sbml_model = sbml_doc.getModel()\n",
"spline.add_to_sbml_model(\n",
" sbml_model,\n",
@@ -520,7 +546,7 @@
}
],
"source": [
- "spline.plot(parameters, xlabel='time');"
+ "spline.plot(parameters, xlabel=\"time\");"
]
},
{
@@ -566,9 +592,9 @@
"source": [
"# Sensitivities with respect to the spline values can be computed\n",
"fig, ax = plt.subplots()\n",
- "ax.plot(rdata['t'], rdata.sx[:, 0], label=model.getParameterNames()[0])\n",
- "ax.plot(rdata['t'], rdata.sx[:, 1], label=model.getParameterNames()[1])\n",
- "ax.plot(rdata['t'], rdata.sx[:, 2], label=model.getParameterNames()[2])\n",
+ "ax.plot(rdata[\"t\"], rdata.sx[:, 0], label=model.getParameterNames()[0])\n",
+ "ax.plot(rdata[\"t\"], rdata.sx[:, 1], label=model.getParameterNames()[1])\n",
+ "ax.plot(rdata[\"t\"], rdata.sx[:, 2], label=model.getParameterNames()[2])\n",
"ax.set_xlabel(\"time\")\n",
"ax.set_ylabel(\"sensitivity\")\n",
"ax.legend();"
@@ -600,7 +626,7 @@
"source": [
"# A simple spline for which finite differencing would give a different result\n",
"spline = amici.splines.CubicHermiteSpline(\n",
- " sbml_id='f',\n",
+ " sbml_id=\"f\",\n",
" evaluate_at=amici.sbml_utils.amici_time_symbol,\n",
" nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=3),\n",
" values_at_nodes=[1.0, -1.0, 1.0],\n",
@@ -627,7 +653,7 @@
}
],
"source": [
- "spline.plot(xlabel='time');"
+ "spline.plot(xlabel=\"time\");"
]
},
{
@@ -650,7 +676,7 @@
],
"source": [
"# Simulation\n",
- "sbml_doc = libsbml.SBMLReader().readSBML('example_splines.xml')\n",
+ "sbml_doc = libsbml.SBMLReader().readSBML(\"example_splines.xml\")\n",
"sbml_model = sbml_doc.getModel()\n",
"spline.add_to_sbml_model(sbml_model)\n",
"simulate(sbml_model, T=1);"
@@ -698,11 +724,14 @@
"outputs": [],
"source": [
"spline = amici.splines.CubicHermiteSpline(\n",
- " sbml_id='f',\n",
+ " sbml_id=\"f\",\n",
" evaluate_at=amici.sbml_utils.amici_time_symbol,\n",
" nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=3),\n",
" values_at_nodes=[-2, 1, -1],\n",
- " extrapolate=(None, 'constant'), # no extrapolation required on the left side\n",
+ " extrapolate=(\n",
+ " None,\n",
+ " \"constant\",\n",
+ " ), # no extrapolation required on the left side\n",
")"
]
},
@@ -725,7 +754,7 @@
}
],
"source": [
- "spline.plot(xlabel='time', xlim=(0, 1.5));"
+ "spline.plot(xlabel=\"time\", xlim=(0, 1.5));"
]
},
{
@@ -747,7 +776,7 @@
}
],
"source": [
- "sbml_doc = libsbml.SBMLReader().readSBML('example_splines.xml')\n",
+ "sbml_doc = libsbml.SBMLReader().readSBML(\"example_splines.xml\")\n",
"sbml_model = sbml_doc.getModel()\n",
"spline.add_to_sbml_model(sbml_model)\n",
"simulate(sbml_model, T=1.5);"
@@ -790,11 +819,11 @@
"outputs": [],
"source": [
"spline = amici.splines.CubicHermiteSpline(\n",
- " sbml_id='f',\n",
+ " sbml_id=\"f\",\n",
" evaluate_at=amici.sbml_utils.amici_time_symbol,\n",
" nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=3),\n",
- " values_at_nodes=[-2, 1, -2], # first and last node must coincide\n",
- " extrapolate='periodic',\n",
+ " values_at_nodes=[-2, 1, -2], # first and last node must coincide\n",
+ " extrapolate=\"periodic\",\n",
")"
]
},
@@ -817,7 +846,7 @@
}
],
"source": [
- "spline.plot(xlabel='time', xlim=(0, 3));"
+ "spline.plot(xlabel=\"time\", xlim=(0, 3));"
]
},
{
@@ -839,7 +868,7 @@
}
],
"source": [
- "sbml_doc = libsbml.SBMLReader().readSBML('example_splines.xml')\n",
+ "sbml_doc = libsbml.SBMLReader().readSBML(\"example_splines.xml\")\n",
"sbml_model = sbml_doc.getModel()\n",
"spline.add_to_sbml_model(sbml_model)\n",
"simulate(sbml_model, T=3);"
@@ -882,11 +911,11 @@
"outputs": [],
"source": [
"spline = amici.splines.CubicHermiteSpline(\n",
- " sbml_id='f',\n",
+ " sbml_id=\"f\",\n",
" evaluate_at=amici.sbml_utils.amici_time_symbol,\n",
" nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=4),\n",
" values_at_nodes=[-1, 2, 4, 2],\n",
- " bc='zeroderivative',\n",
+ " bc=\"zeroderivative\",\n",
")"
]
},
@@ -909,7 +938,7 @@
}
],
"source": [
- "spline.plot(xlabel='time');"
+ "spline.plot(xlabel=\"time\");"
]
},
{
@@ -941,11 +970,11 @@
"outputs": [],
"source": [
"spline = amici.splines.CubicHermiteSpline(\n",
- " sbml_id='f',\n",
+ " sbml_id=\"f\",\n",
" evaluate_at=amici.sbml_utils.amici_time_symbol,\n",
" nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=4),\n",
" values_at_nodes=[-1, 2, 4, 2],\n",
- " bc='natural',\n",
+ " bc=\"natural\",\n",
")"
]
},
@@ -968,7 +997,7 @@
}
],
"source": [
- "spline.plot(xlabel='time');"
+ "spline.plot(xlabel=\"time\");"
]
},
{
@@ -1007,7 +1036,7 @@
"outputs": [],
"source": [
"spline = amici.splines.CubicHermiteSpline(\n",
- " sbml_id='f',\n",
+ " sbml_id=\"f\",\n",
" evaluate_at=amici.sbml_utils.amici_time_symbol,\n",
" nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=5),\n",
" values_at_nodes=[2, 0.05, 0.1, 2, 1],\n",
@@ -1034,7 +1063,7 @@
],
"source": [
"# This spline assumes negative values!\n",
- "spline.plot(xlabel='time');"
+ "spline.plot(xlabel=\"time\");"
]
},
{
@@ -1046,7 +1075,7 @@
"outputs": [],
"source": [
"spline = amici.splines.CubicHermiteSpline(\n",
- " sbml_id='f',\n",
+ " sbml_id=\"f\",\n",
" evaluate_at=amici.sbml_utils.amici_time_symbol,\n",
" nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=5),\n",
" values_at_nodes=[2, 0.05, 0.1, 2, 1],\n",
@@ -1075,7 +1104,7 @@
"source": [
"# Instead of under-shooting we now have over-shooting,\n",
"# but at least the \"spline\" is always positive\n",
- "spline.plot(xlabel='time');"
+ "spline.plot(xlabel=\"time\");"
]
},
{
@@ -1121,9 +1150,27 @@
},
"outputs": [],
"source": [
- "nruns = 6 # number of replicates\n",
- "num_nodes = [5, 10, 15, 20, 25, 30, 40] # benchmark model import for these node numbers\n",
- "amici_only_nodes = [50, 75, 100, 125, 150, 175, 200, 225, 250] # for these node numbers, only benchmark the annotation-based implementation"
+ "nruns = 6 # number of replicates\n",
+ "num_nodes = [\n",
+ " 5,\n",
+ " 10,\n",
+ " 15,\n",
+ " 20,\n",
+ " 25,\n",
+ " 30,\n",
+ " 40,\n",
+ "] # benchmark model import for these node numbers\n",
+ "amici_only_nodes = [\n",
+ " 50,\n",
+ " 75,\n",
+ " 100,\n",
+ " 125,\n",
+ " 150,\n",
+ " 175,\n",
+ " 200,\n",
+ " 225,\n",
+ " 250,\n",
+ "] # for these node numbers, only benchmark the annotation-based implementation"
]
},
{
@@ -1133,7 +1180,7 @@
"outputs": [],
"source": [
"# If running as a Github action, just do the minimal amount of work required to check whether the code is working\n",
- "if os.getenv('GITHUB_ACTIONS') is not None:\n",
+ "if os.getenv(\"GITHUB_ACTIONS\") is not None:\n",
" nruns = 1\n",
" num_nodes = [4]\n",
" amici_only_nodes = [5]"
@@ -1151,12 +1198,12 @@
"for n in num_nodes + amici_only_nodes:\n",
" # Create model\n",
" spline = amici.splines.CubicHermiteSpline(\n",
- " sbml_id='f',\n",
+ " sbml_id=\"f\",\n",
" evaluate_at=amici.sbml_utils.amici_time_symbol,\n",
" nodes=amici.splines.UniformGrid(0, 1, number_of_nodes=n),\n",
" values_at_nodes=np.random.rand(n),\n",
" )\n",
- " sbml_doc = libsbml.SBMLReader().readSBML('example_splines.xml')\n",
+ " sbml_doc = libsbml.SBMLReader().readSBML(\"example_splines.xml\")\n",
" sbml_model = sbml_doc.getModel()\n",
" spline.add_to_sbml_model(sbml_model)\n",
" # Benchmark model creation\n",
@@ -1165,22 +1212,34 @@
" for _ in range(nruns):\n",
" with tempfile.TemporaryDirectory() as tmpdir:\n",
" t0 = time.perf_counter_ns()\n",
- " amici.SbmlImporter(sbml_model).sbml2amici('benchmark', tmpdir)\n",
+ " amici.SbmlImporter(sbml_model).sbml2amici(\"benchmark\", tmpdir)\n",
" dt = time.perf_counter_ns() - t0\n",
" timings_amici.append(dt / 1e9)\n",
" if n in num_nodes:\n",
" with tempfile.TemporaryDirectory() as tmpdir:\n",
" t0 = time.perf_counter_ns()\n",
- " amici.SbmlImporter(sbml_model, discard_annotations=True).sbml2amici('benchmark', tmpdir)\n",
+ " amici.SbmlImporter(\n",
+ " sbml_model, discard_annotations=True\n",
+ " ).sbml2amici(\"benchmark\", tmpdir)\n",
" dt = time.perf_counter_ns() - t0\n",
" timings_piecewise.append(dt / 1e9)\n",
" # Append benchmark data to dataframe\n",
- " df_amici = pd.DataFrame(dict(num_nodes=n, time=timings_amici, use_annotations=True))\n",
- " df_piecewise = pd.DataFrame(dict(num_nodes=n, time=timings_piecewise, use_annotations=False))\n",
+ " df_amici = pd.DataFrame(\n",
+ " dict(num_nodes=n, time=timings_amici, use_annotations=True)\n",
+ " )\n",
+ " df_piecewise = pd.DataFrame(\n",
+ " dict(num_nodes=n, time=timings_piecewise, use_annotations=False)\n",
+ " )\n",
" if df is None:\n",
- " df = pd.concat([df_amici, df_piecewise], ignore_index=True, verify_integrity=True)\n",
+ " df = pd.concat(\n",
+ " [df_amici, df_piecewise], ignore_index=True, verify_integrity=True\n",
+ " )\n",
" else:\n",
- " df = pd.concat([df, df_amici, df_piecewise], ignore_index=True, verify_integrity=True)"
+ " df = pd.concat(\n",
+ " [df, df_amici, df_piecewise],\n",
+ " ignore_index=True,\n",
+ " verify_integrity=True,\n",
+ " )"
]
},
{
@@ -1203,19 +1262,62 @@
],
"source": [
"kwargs = dict(markersize=7.5)\n",
- "df_avg = df.groupby(['use_annotations', 'num_nodes']).mean().reset_index()\n",
+ "df_avg = df.groupby([\"use_annotations\", \"num_nodes\"]).mean().reset_index()\n",
"fig, ax = plt.subplots(1, 1, figsize=(6.5, 3.5))\n",
- "ax.plot(df_avg[np.logical_not(df_avg['use_annotations'])]['num_nodes'], df_avg[np.logical_not(df_avg['use_annotations'])]['time'], '.', label='MathML piecewise', **kwargs)\n",
- "ax.plot(df_avg[df_avg['use_annotations']]['num_nodes'], df_avg[df_avg['use_annotations']]['time'], '.', label='AMICI annotations', **kwargs)\n",
- "ax.set_ylabel('model import time (s)')\n",
- "ax.set_xlabel('number of spline nodes')\n",
- "ax.set_yscale('log')\n",
- "ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, pos: f\"{x:.0f}\"))\n",
- "ax.xaxis.set_ticks([10, 20, 30, 40, 60, 70, 80, 90, 110, 120, 130, 140, 160, 170, 180, 190, 210, 220, 230, 240, 260], minor=True)\n",
- "ax.yaxis.set_ticks([20, 30, 40, 50, 60, 70, 80, 90, 200, 300, 400], ['20', '30', '40', '50', None, None, None, None, '200', '300', '400'], minor=True)\n",
+ "ax.plot(\n",
+ " df_avg[np.logical_not(df_avg[\"use_annotations\"])][\"num_nodes\"],\n",
+ " df_avg[np.logical_not(df_avg[\"use_annotations\"])][\"time\"],\n",
+ " \".\",\n",
+ " label=\"MathML piecewise\",\n",
+ " **kwargs,\n",
+ ")\n",
+ "ax.plot(\n",
+ " df_avg[df_avg[\"use_annotations\"]][\"num_nodes\"],\n",
+ " df_avg[df_avg[\"use_annotations\"]][\"time\"],\n",
+ " \".\",\n",
+ " label=\"AMICI annotations\",\n",
+ " **kwargs,\n",
+ ")\n",
+ "ax.set_ylabel(\"model import time (s)\")\n",
+ "ax.set_xlabel(\"number of spline nodes\")\n",
+ "ax.set_yscale(\"log\")\n",
+ "ax.yaxis.set_major_formatter(\n",
+ " mpl.ticker.FuncFormatter(lambda x, pos: f\"{x:.0f}\")\n",
+ ")\n",
+ "ax.xaxis.set_ticks(\n",
+ " [\n",
+ " 10,\n",
+ " 20,\n",
+ " 30,\n",
+ " 40,\n",
+ " 60,\n",
+ " 70,\n",
+ " 80,\n",
+ " 90,\n",
+ " 110,\n",
+ " 120,\n",
+ " 130,\n",
+ " 140,\n",
+ " 160,\n",
+ " 170,\n",
+ " 180,\n",
+ " 190,\n",
+ " 210,\n",
+ " 220,\n",
+ " 230,\n",
+ " 240,\n",
+ " 260,\n",
+ " ],\n",
+ " minor=True,\n",
+ ")\n",
+ "ax.yaxis.set_ticks(\n",
+ " [20, 30, 40, 50, 60, 70, 80, 90, 200, 300, 400],\n",
+ " [\"20\", \"30\", \"40\", \"50\", None, None, None, None, \"200\", \"300\", \"400\"],\n",
+ " minor=True,\n",
+ ")\n",
"ax.legend()\n",
"ax.figure.tight_layout()\n",
- "#ax.figure.savefig('benchmark_import.pdf')"
+ "# ax.figure.savefig('benchmark_import.pdf')"
]
}
],
diff --git a/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb b/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb
index 8846974330..8e3ee6db10 100644
--- a/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb
+++ b/python/examples/example_splines_swameye/ExampleSplinesSwameye2003.ipynb
@@ -104,9 +104,11 @@
"outputs": [],
"source": [
"# If running as a Github action, just do the minimal amount of work required to check whether the code is working\n",
- "if os.getenv('GITHUB_ACTIONS') is not None:\n",
+ "if os.getenv(\"GITHUB_ACTIONS\") is not None:\n",
" n_starts = 15\n",
- " pypesto_optimizer = pypesto.optimize.FidesOptimizer(verbose=logging.WARNING, options=dict(maxiter=10))\n",
+ " pypesto_optimizer = pypesto.optimize.FidesOptimizer(\n",
+ " verbose=logging.WARNING, options=dict(maxiter=10)\n",
+ " )\n",
" pypesto_engine = pypesto.engine.SingleCoreEngine()"
]
},
@@ -175,14 +177,16 @@
"source": [
"# Create spline for pEpoR\n",
"nodes = [0, 5, 10, 20, 60]\n",
- "values_at_nodes = [sp.Symbol(f\"pEpoR_t{str(t).replace('.', '_dot_')}\") for t in nodes] # new parameter symbols for spline values\n",
+ "values_at_nodes = [\n",
+ " sp.Symbol(f\"pEpoR_t{str(t).replace('.', '_dot_')}\") for t in nodes\n",
+ "] # new parameter symbols for spline values\n",
"spline = amici.splines.CubicHermiteSpline(\n",
- " sbml_id='pEpoR', # matches name of species in SBML model\n",
- " evaluate_at=amici.sbml_utils.amici_time_symbol, # the spline is evaluated at the current time\n",
+ " sbml_id=\"pEpoR\", # matches name of species in SBML model\n",
+ " evaluate_at=amici.sbml_utils.amici_time_symbol, # the spline is evaluated at the current time\n",
" nodes=nodes,\n",
- " values_at_nodes=values_at_nodes, # values at the nodes (in linear scale)\n",
- " extrapolate=(None, \"constant\"), # because steady state is reached\n",
- " bc=\"auto\", # automatically determined from extrapolate (bc at right end will be 'zero_derivative')\n",
+ " values_at_nodes=values_at_nodes, # values at the nodes (in linear scale)\n",
+ " extrapolate=(None, \"constant\"), # because steady state is reached\n",
+ " bc=\"auto\", # automatically determined from extrapolate (bc at right end will be 'zero_derivative')\n",
" logarithmic_parametrization=True,\n",
")"
]
@@ -209,9 +213,13 @@
"outputs": [],
"source": [
"# Add spline formula to SBML model\n",
- "sbml_doc = libsbml.SBMLReader().readSBML(os.path.join('Swameye_PNAS2003', 'swameye2003_model.xml'))\n",
+ "sbml_doc = libsbml.SBMLReader().readSBML(\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_model.xml\")\n",
+ ")\n",
"sbml_model = sbml_doc.getModel()\n",
- "spline.add_to_sbml_model(sbml_model, auto_add=True, y_nominal=0.1, y_constant=True)"
+ "spline.add_to_sbml_model(\n",
+ " sbml_model, auto_add=True, y_nominal=0.1, y_constant=True\n",
+ ")"
]
},
{
@@ -239,7 +247,13 @@
"source": [
"# Extra parameters associated to the spline\n",
"spline_parameters_df = pd.DataFrame(\n",
- " dict(parameterScale='log', lowerBound=0.001, upperBound=10, nominalValue=0.1, estimate=1),\n",
+ " dict(\n",
+ " parameterScale=\"log\",\n",
+ " lowerBound=0.001,\n",
+ " upperBound=10,\n",
+ " nominalValue=0.1,\n",
+ " estimate=1,\n",
+ " ),\n",
" index=pd.Series(list(map(str, values_at_nodes)), name=\"parameterId\"),\n",
")"
]
@@ -256,13 +270,22 @@
"# Create PEtab problem\n",
"petab_problem = petab.Problem(\n",
" sbml_model,\n",
- " condition_df=petab.conditions.get_condition_df(os.path.join('Swameye_PNAS2003', 'swameye2003_conditions.tsv')),\n",
- " measurement_df=petab.measurements.get_measurement_df(os.path.join('Swameye_PNAS2003', 'swameye2003_measurements.tsv')),\n",
+ " condition_df=petab.conditions.get_condition_df(\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_conditions.tsv\")\n",
+ " ),\n",
+ " measurement_df=petab.measurements.get_measurement_df(\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_measurements.tsv\")\n",
+ " ),\n",
" parameter_df=petab.core.concat_tables(\n",
- " [os.path.join('Swameye_PNAS2003', 'swameye2003_parameters.tsv'), spline_parameters_df],\n",
- " petab.parameters.get_parameter_df\n",
+ " [\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_parameters.tsv\"),\n",
+ " spline_parameters_df,\n",
+ " ],\n",
+ " petab.parameters.get_parameter_df,\n",
+ " ),\n",
+ " observable_df=petab.observables.get_observable_df(\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_observables.tsv\")\n",
" ),\n",
- " observable_df=petab.observables.get_observable_df(os.path.join('Swameye_PNAS2003', 'swameye2003_observables.tsv')),\n",
")"
]
},
@@ -351,7 +374,9 @@
"outputs": [],
"source": [
"# Import PEtab problem into pyPESTO\n",
- "pypesto_problem = pypesto.petab.PetabImporter(petab_problem, model_name=name).create_problem()\n",
+ "pypesto_problem = pypesto.petab.PetabImporter(\n",
+ " petab_problem, model_name=name\n",
+ ").create_problem()\n",
"\n",
"# Increase maximum number of steps for AMICI\n",
"pypesto_problem.objective.amici_solver.setMaxSteps(10**5)"
@@ -378,8 +403,10 @@
"outputs": [],
"source": [
"# Load existing results if available\n",
- "if os.path.exists(f'{name}.h5'):\n",
- " pypesto_result = pypesto.store.read_result(f'{name}.h5', problem=pypesto_problem)\n",
+ "if os.path.exists(f\"{name}.h5\"):\n",
+ " pypesto_result = pypesto.store.read_result(\n",
+ " f\"{name}.h5\", problem=pypesto_problem\n",
+ " )\n",
"else:\n",
" pypesto_result = None\n",
"# Overwrite\n",
@@ -401,7 +428,7 @@
" new_ids = [str(i) for i in range(n_starts)]\n",
" else:\n",
" last_id = max(int(i) for i in pypesto_result.optimize_result.id)\n",
- " new_ids = [str(i) for i in range(last_id+1, last_id+n_starts+1)]\n",
+ " new_ids = [str(i) for i in range(last_id + 1, last_id + n_starts + 1)]\n",
" pypesto_result = pypesto.optimize.minimize(\n",
" pypesto_problem,\n",
" n_starts=n_starts,\n",
@@ -412,7 +439,9 @@
" )\n",
" pypesto_result.optimize_result.sort()\n",
" if pypesto_result.optimize_result.x[0] is None:\n",
- " raise Exception(\"All multistarts failed (n_starts is probably too small)! If this error occurred during CI, just run the workflow again.\")"
+ " raise Exception(\n",
+ " \"All multistarts failed (n_starts is probably too small)! If this error occurred during CI, just run the workflow again.\"\n",
+ " )"
]
},
{
@@ -495,32 +524,52 @@
" if N is None:\n",
" objective = problem.objective\n",
" else:\n",
- " objective = problem.objective.set_custom_timepoints(timepoints_global=np.linspace(0, 60, N))\n",
+ " objective = problem.objective.set_custom_timepoints(\n",
+ " timepoints_global=np.linspace(0, 60, N)\n",
+ " )\n",
" if len(x) != len(problem.x_free_indices):\n",
" x = x[problem.x_free_indices]\n",
" simresult = objective(x, return_dict=True, **kwargs)\n",
- " return problem, simresult['rdatas'][0]\n",
+ " return problem, simresult[\"rdatas\"][0]\n",
+ "\n",
"\n",
"def simulate_pEpoR(x=None, **kwargs):\n",
" problem, rdata = _simulate(x, **kwargs)\n",
- " assert problem.objective.amici_model.getObservableIds()[0].startswith('pEpoR')\n",
- " return rdata['t'], rdata['y'][:, 0]\n",
+ " assert problem.objective.amici_model.getObservableIds()[0].startswith(\n",
+ " \"pEpoR\"\n",
+ " )\n",
+ " return rdata[\"t\"], rdata[\"y\"][:, 0]\n",
+ "\n",
"\n",
"def simulate_pSTAT5(x=None, **kwargs):\n",
" problem, rdata = _simulate(x, **kwargs)\n",
- " assert problem.objective.amici_model.getObservableIds()[1].startswith('pSTAT5')\n",
- " return rdata['t'], rdata['y'][:, 1]\n",
+ " assert problem.objective.amici_model.getObservableIds()[1].startswith(\n",
+ " \"pSTAT5\"\n",
+ " )\n",
+ " return rdata[\"t\"], rdata[\"y\"][:, 1]\n",
+ "\n",
"\n",
"def simulate_tSTAT5(x=None, **kwargs):\n",
" problem, rdata = _simulate(x, **kwargs)\n",
- " assert problem.objective.amici_model.getObservableIds()[-1].startswith('tSTAT5')\n",
- " return rdata['t'], rdata['y'][:, -1]\n",
+ " assert problem.objective.amici_model.getObservableIds()[-1].startswith(\n",
+ " \"tSTAT5\"\n",
+ " )\n",
+ " return rdata[\"t\"], rdata[\"y\"][:, -1]\n",
+ "\n",
"\n",
"# Experimental data\n",
- "df_measurements = petab.measurements.get_measurement_df(os.path.join('Swameye_PNAS2003', 'swameye2003_measurements.tsv'))\n",
- "df_pEpoR = df_measurements[df_measurements['observableId'].str.startswith('pEpoR')]\n",
- "df_pSTAT5 = df_measurements[df_measurements['observableId'].str.startswith('pSTAT5')]\n",
- "df_tSTAT5 = df_measurements[df_measurements['observableId'].str.startswith('tSTAT5')]"
+ "df_measurements = petab.measurements.get_measurement_df(\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_measurements.tsv\")\n",
+ ")\n",
+ "df_pEpoR = df_measurements[\n",
+ " df_measurements[\"observableId\"].str.startswith(\"pEpoR\")\n",
+ "]\n",
+ "df_pSTAT5 = df_measurements[\n",
+ " df_measurements[\"observableId\"].str.startswith(\"pSTAT5\")\n",
+ "]\n",
+ "df_tSTAT5 = df_measurements[\n",
+ " df_measurements[\"observableId\"].str.startswith(\"tSTAT5\")\n",
+ "]"
]
},
{
@@ -547,11 +596,34 @@
"fig, ax = plt.subplots(figsize=(6.5, 3.5))\n",
"t, pEpoR = simulate_pEpoR()\n",
"sigma_pEpoR = 0.0274 + 0.1 * pEpoR\n",
- "ax.fill_between(t, pEpoR - 2*sigma_pEpoR, pEpoR + 2*sigma_pEpoR, color='black', alpha=0.10, interpolate=True, label='2-sigma error bands')\n",
- "ax.plot(t, pEpoR, color='black', label='MLE')\n",
- "ax.plot(df_pEpoR['time'], df_pEpoR['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n",
+ "ax.fill_between(\n",
+ " t,\n",
+ " pEpoR - 2 * sigma_pEpoR,\n",
+ " pEpoR + 2 * sigma_pEpoR,\n",
+ " color=\"black\",\n",
+ " alpha=0.10,\n",
+ " interpolate=True,\n",
+ " label=\"2-sigma error bands\",\n",
+ ")\n",
+ "ax.plot(t, pEpoR, color=\"black\", label=\"MLE\")\n",
+ "ax.plot(\n",
+ " df_pEpoR[\"time\"],\n",
+ " df_pEpoR[\"measurement\"],\n",
+ " \"o\",\n",
+ " color=\"black\",\n",
+ " markerfacecolor=\"none\",\n",
+ " label=\"experimental data\",\n",
+ ")\n",
"ylim1 = ax.get_ylim()[0]\n",
- "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n",
+ "ax.plot(\n",
+ " nodes,\n",
+ " len(nodes) * [ylim1],\n",
+ " \"x\",\n",
+ " color=\"black\",\n",
+ " label=\"spline nodes\",\n",
+ " zorder=10,\n",
+ " clip_on=False,\n",
+ ")\n",
"ax.set_ylim(ylim1, ax.get_ylim()[1])\n",
"ax.set_xlabel(\"time\")\n",
"ax.set_ylabel(\"pEpoR\")\n",
@@ -581,10 +653,25 @@
"# Plot ML fit for pSTAT5\n",
"fig, ax = plt.subplots(figsize=(6.5, 3.5))\n",
"t, pSTAT5 = simulate_pSTAT5()\n",
- "ax.plot(t, pSTAT5, color='black', label='MLE')\n",
- "ax.plot(df_pSTAT5['time'], df_pSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n",
+ "ax.plot(t, pSTAT5, color=\"black\", label=\"MLE\")\n",
+ "ax.plot(\n",
+ " df_pSTAT5[\"time\"],\n",
+ " df_pSTAT5[\"measurement\"],\n",
+ " \"o\",\n",
+ " color=\"black\",\n",
+ " markerfacecolor=\"none\",\n",
+ " label=\"experimental data\",\n",
+ ")\n",
"ylim1 = ax.get_ylim()[0]\n",
- "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n",
+ "ax.plot(\n",
+ " nodes,\n",
+ " len(nodes) * [ylim1],\n",
+ " \"x\",\n",
+ " color=\"black\",\n",
+ " label=\"spline nodes\",\n",
+ " zorder=10,\n",
+ " clip_on=False,\n",
+ ")\n",
"ax.set_ylim(ylim1, ax.get_ylim()[1])\n",
"ax.set_xlabel(\"time\")\n",
"ax.set_ylabel(\"pSTAT5\")\n",
@@ -614,10 +701,25 @@
"# Plot ML fit for tSTAT5\n",
"fig, ax = plt.subplots(figsize=(6.5, 3.5))\n",
"t, tSTAT5 = simulate_tSTAT5()\n",
- "ax.plot(t, tSTAT5, color='black', label='MLE')\n",
- "ax.plot(df_tSTAT5['time'], df_tSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n",
+ "ax.plot(t, tSTAT5, color=\"black\", label=\"MLE\")\n",
+ "ax.plot(\n",
+ " df_tSTAT5[\"time\"],\n",
+ " df_tSTAT5[\"measurement\"],\n",
+ " \"o\",\n",
+ " color=\"black\",\n",
+ " markerfacecolor=\"none\",\n",
+ " label=\"experimental data\",\n",
+ ")\n",
"ylim1 = ax.get_ylim()[0]\n",
- "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n",
+ "ax.plot(\n",
+ " nodes,\n",
+ " len(nodes) * [ylim1],\n",
+ " \"x\",\n",
+ " color=\"black\",\n",
+ " label=\"spline nodes\",\n",
+ " zorder=10,\n",
+ " clip_on=False,\n",
+ ")\n",
"ax.set_ylim(ylim1, ax.get_ylim()[1])\n",
"ax.set_xlabel(\"time\")\n",
"ax.set_ylabel(\"tSTAT5\")\n",
@@ -634,7 +736,7 @@
"outputs": [],
"source": [
"# Store results for later\n",
- "all_results['5 nodes, FD'] = (pypesto_problem, pypesto_result)"
+ "all_results[\"5 nodes, FD\"] = (pypesto_problem, pypesto_result)"
]
},
{
@@ -680,9 +782,11 @@
"source": [
"# Create spline for pEpoR\n",
"nodes = [0, 2.5, 5.0, 7.5, 10.0, 12.5, 15.0, 17.5, 20, 25, 30, 35, 40, 50, 60]\n",
- "values_at_nodes = [sp.Symbol(f\"pEpoR_t{str(t).replace('.', '_dot_')}\") for t in nodes]\n",
+ "values_at_nodes = [\n",
+ " sp.Symbol(f\"pEpoR_t{str(t).replace('.', '_dot_')}\") for t in nodes\n",
+ "]\n",
"spline = amici.splines.CubicHermiteSpline(\n",
- " sbml_id='pEpoR',\n",
+ " sbml_id=\"pEpoR\",\n",
" evaluate_at=amici.sbml_utils.amici_time_symbol,\n",
" nodes=nodes,\n",
" values_at_nodes=values_at_nodes,\n",
@@ -726,17 +830,34 @@
"source": [
"# Add a parameter for regularization strength\n",
"reg_parameters_df = pd.DataFrame(\n",
- " dict(parameterScale='log10', lowerBound=1e-6, upperBound=1e6, nominalValue=1.0, estimate=0),\n",
- " index=pd.Series(['regularization_strength'], name=\"parameterId\"),\n",
+ " dict(\n",
+ " parameterScale=\"log10\",\n",
+ " lowerBound=1e-6,\n",
+ " upperBound=1e6,\n",
+ " nominalValue=1.0,\n",
+ " estimate=0,\n",
+ " ),\n",
+ " index=pd.Series([\"regularization_strength\"], name=\"parameterId\"),\n",
")\n",
"# Encode regularization term as an additional observable\n",
"reg_observables_df = pd.DataFrame(\n",
- " dict(observableFormula=f'sqrt({regularization})'.replace('**', '^'), observableTransformation='lin', noiseFormula='1/sqrt(regularization_strength)', noiseDistribution='normal'),\n",
- " index=pd.Series(['regularization'], name=\"observableId\"),\n",
+ " dict(\n",
+ " observableFormula=f\"sqrt({regularization})\".replace(\"**\", \"^\"),\n",
+ " observableTransformation=\"lin\",\n",
+ " noiseFormula=\"1/sqrt(regularization_strength)\",\n",
+ " noiseDistribution=\"normal\",\n",
+ " ),\n",
+ " index=pd.Series([\"regularization\"], name=\"observableId\"),\n",
")\n",
"# and correspoding measurement\n",
"reg_measurements_df = pd.DataFrame(\n",
- " dict(observableId='regularization', simulationConditionId='condition1', measurement=0, time=0, observableTransformation='lin'),\n",
+ " dict(\n",
+ " observableId=\"regularization\",\n",
+ " simulationConditionId=\"condition1\",\n",
+ " measurement=0,\n",
+ " time=0,\n",
+ " observableTransformation=\"lin\",\n",
+ " ),\n",
" index=pd.Series([0]),\n",
")"
]
@@ -751,9 +872,13 @@
"outputs": [],
"source": [
"# Add spline formula to SBML model\n",
- "sbml_doc = libsbml.SBMLReader().readSBML(os.path.join('Swameye_PNAS2003', 'swameye2003_model.xml'))\n",
+ "sbml_doc = libsbml.SBMLReader().readSBML(\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_model.xml\")\n",
+ ")\n",
"sbml_model = sbml_doc.getModel()\n",
- "spline.add_to_sbml_model(sbml_model, auto_add=True, y_nominal=0.1, y_constant=True)"
+ "spline.add_to_sbml_model(\n",
+ " sbml_model, auto_add=True, y_nominal=0.1, y_constant=True\n",
+ ")"
]
},
{
@@ -767,7 +892,13 @@
"source": [
"# Extra parameters associated to the spline\n",
"spline_parameters_df = pd.DataFrame(\n",
- " dict(parameterScale='log', lowerBound=0.001, upperBound=10, nominalValue=0.1, estimate=1),\n",
+ " dict(\n",
+ " parameterScale=\"log\",\n",
+ " lowerBound=0.001,\n",
+ " upperBound=10,\n",
+ " nominalValue=0.1,\n",
+ " estimate=1,\n",
+ " ),\n",
" index=pd.Series(list(map(str, values_at_nodes)), name=\"parameterId\"),\n",
")"
]
@@ -784,18 +915,30 @@
"# Create PEtab problem\n",
"petab_problem = petab.Problem(\n",
" sbml_model,\n",
- " condition_df=petab.conditions.get_condition_df(os.path.join('Swameye_PNAS2003', 'swameye2003_conditions.tsv')),\n",
+ " condition_df=petab.conditions.get_condition_df(\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_conditions.tsv\")\n",
+ " ),\n",
" measurement_df=petab.core.concat_tables(\n",
- " [os.path.join('Swameye_PNAS2003', 'swameye2003_measurements.tsv'), reg_measurements_df],\n",
- " petab.measurements.get_measurement_df\n",
+ " [\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_measurements.tsv\"),\n",
+ " reg_measurements_df,\n",
+ " ],\n",
+ " petab.measurements.get_measurement_df,\n",
" ).reset_index(drop=True),\n",
" parameter_df=petab.core.concat_tables(\n",
- " [os.path.join('Swameye_PNAS2003', 'swameye2003_parameters.tsv'), spline_parameters_df, reg_parameters_df],\n",
- " petab.parameters.get_parameter_df\n",
+ " [\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_parameters.tsv\"),\n",
+ " spline_parameters_df,\n",
+ " reg_parameters_df,\n",
+ " ],\n",
+ " petab.parameters.get_parameter_df,\n",
" ),\n",
" observable_df=petab.core.concat_tables(\n",
- " [os.path.join('Swameye_PNAS2003', 'swameye2003_observables.tsv'), reg_observables_df],\n",
- " petab.observables.get_observable_df\n",
+ " [\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_observables.tsv\"),\n",
+ " reg_observables_df,\n",
+ " ],\n",
+ " petab.observables.get_observable_df,\n",
" ),\n",
")"
]
@@ -875,7 +1018,9 @@
"outputs": [],
"source": [
"# Import PEtab problem into pyPESTO\n",
- "pypesto_problem = pypesto.petab.PetabImporter(petab_problem, model_name=name).create_problem()"
+ "pypesto_problem = pypesto.petab.PetabImporter(\n",
+ " petab_problem, model_name=name\n",
+ ").create_problem()"
]
},
{
@@ -898,7 +1043,7 @@
"source": [
"# Try different regularization strengths\n",
"regstrengths = np.asarray([1, 10, 40, 75, 150, 500])\n",
- "if os.getenv('GITHUB_ACTIONS') is not None:\n",
+ "if os.getenv(\"GITHUB_ACTIONS\") is not None:\n",
" regstrengths = np.asarray([75])\n",
"regproblems = {}\n",
"regresults = {}\n",
@@ -907,14 +1052,16 @@
" # Fix parameter in pypesto problem\n",
" name = f\"Swameye_PNAS2003_15nodes_FD_reg{regstrength}\"\n",
" pypesto_problem.fix_parameters(\n",
- " pypesto_problem.x_names.index('regularization_strength'),\n",
- " np.log10(regstrength) # parameter is specified as log10 scale in PEtab\n",
+ " pypesto_problem.x_names.index(\"regularization_strength\"),\n",
+ " np.log10(\n",
+ " regstrength\n",
+ " ), # parameter is specified as log10 scale in PEtab\n",
" )\n",
" regproblem = copy.deepcopy(pypesto_problem)\n",
"\n",
" # Load existing results if available\n",
- " if os.path.exists(f'{name}.h5'):\n",
- " regresult = pypesto.store.read_result(f'{name}.h5', problem=regproblem)\n",
+ " if os.path.exists(f\"{name}.h5\"):\n",
+ " regresult = pypesto.store.read_result(f\"{name}.h5\", problem=regproblem)\n",
" else:\n",
" regresult = None\n",
" # Overwrite\n",
@@ -926,7 +1073,9 @@
" new_ids = [str(i) for i in range(n_starts)]\n",
" else:\n",
" last_id = max(int(i) for i in regresult.optimize_result.id)\n",
- " new_ids = [str(i) for i in range(last_id+1, last_id+n_starts+1)]\n",
+ " new_ids = [\n",
+ " str(i) for i in range(last_id + 1, last_id + n_starts + 1)\n",
+ " ]\n",
" regresult = pypesto.optimize.minimize(\n",
" regproblem,\n",
" n_starts=n_starts,\n",
@@ -937,7 +1086,9 @@
" )\n",
" regresult.optimize_result.sort()\n",
" if regresult.optimize_result.x[0] is None:\n",
- " raise Exception(\"All multistarts failed (n_starts is probably too small)! If this error occurred during CI, just run the workflow again.\")\n",
+ " raise Exception(\n",
+ " \"All multistarts failed (n_starts is probably too small)! If this error occurred during CI, just run the workflow again.\"\n",
+ " )\n",
"\n",
" # Save results to disk\n",
" # pypesto.store.write_result(regresult, f'{name}.h5', overwrite=True)\n",
@@ -975,15 +1126,21 @@
"regstrengths = sorted(regproblems.keys())\n",
"stats = []\n",
"for regstrength in regstrengths:\n",
- " t, pEpoR = simulate_pEpoR(N=None, problem=regproblems[regstrength], result=regresults[regstrength])\n",
- " assert np.array_equal(df_pEpoR['time'], t[:-1])\n",
+ " t, pEpoR = simulate_pEpoR(\n",
+ " N=None,\n",
+ " problem=regproblems[regstrength],\n",
+ " result=regresults[regstrength],\n",
+ " )\n",
+ " assert np.array_equal(df_pEpoR[\"time\"], t[:-1])\n",
" pEpoR = pEpoR[:-1]\n",
" sigma_pEpoR = 0.0274 + 0.1 * pEpoR\n",
- " stat = np.sum(((pEpoR - df_pEpoR['measurement']) / sigma_pEpoR)**2)\n",
+ " stat = np.sum(((pEpoR - df_pEpoR[\"measurement\"]) / sigma_pEpoR) ** 2)\n",
" print(f\"Regularization strength: {regstrength}. Statistic is {stat}\")\n",
" stats.append(stat)\n",
"# Select best regularization strength\n",
- "chosen_regstrength = regstrengths[np.abs(np.asarray(stats) - len(df_pEpoR['time'])).argmin()]"
+ "chosen_regstrength = regstrengths[\n",
+ " np.abs(np.asarray(stats) - len(df_pEpoR[\"time\"])).argmin()\n",
+ "]"
]
},
{
@@ -1007,8 +1164,12 @@
],
"source": [
"# Visualize the results of the multistarts for a chosen regularization strength\n",
- "ax = pypesto.visualize.waterfall(regresults[chosen_regstrength], size=[6.5, 3.5])\n",
- "ax.set_title(f\"Waterfall plot (regularization strength = {chosen_regstrength})\")\n",
+ "ax = pypesto.visualize.waterfall(\n",
+ " regresults[chosen_regstrength], size=[6.5, 3.5]\n",
+ ")\n",
+ "ax.set_title(\n",
+ " f\"Waterfall plot (regularization strength = {chosen_regstrength})\"\n",
+ ")\n",
"ax.set_ylim(ax.get_ylim()[0], 100);"
]
},
@@ -1035,15 +1196,36 @@
"# Plot ML fit for pEpoR (all regularization strengths)\n",
"fig, ax = plt.subplots(figsize=(6.5, 3.5))\n",
"for regstrength in sorted(regproblems.keys()):\n",
- " t, pEpoR = simulate_pEpoR(problem=regproblems[regstrength], result=regresults[regstrength])\n",
+ " t, pEpoR = simulate_pEpoR(\n",
+ " problem=regproblems[regstrength], result=regresults[regstrength]\n",
+ " )\n",
" if regstrength == chosen_regstrength:\n",
- " kwargs = dict(color='black', label=f'$\\\\mathbf{{\\\\lambda = {regstrength}}}$', zorder=2)\n",
+ " kwargs = dict(\n",
+ " color=\"black\",\n",
+ " label=f\"$\\\\mathbf{{\\\\lambda = {regstrength}}}$\",\n",
+ " zorder=2,\n",
+ " )\n",
" else:\n",
- " kwargs = dict(label=f'$\\\\lambda = {regstrength}$', alpha=0.5)\n",
+ " kwargs = dict(label=f\"$\\\\lambda = {regstrength}$\", alpha=0.5)\n",
" ax.plot(t, pEpoR, **kwargs)\n",
- "ax.plot(df_pEpoR['time'], df_pEpoR['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n",
+ "ax.plot(\n",
+ " df_pEpoR[\"time\"],\n",
+ " df_pEpoR[\"measurement\"],\n",
+ " \"o\",\n",
+ " color=\"black\",\n",
+ " markerfacecolor=\"none\",\n",
+ " label=\"experimental data\",\n",
+ ")\n",
"ylim1 = ax.get_ylim()[0]\n",
- "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n",
+ "ax.plot(\n",
+ " nodes,\n",
+ " len(nodes) * [ylim1],\n",
+ " \"x\",\n",
+ " color=\"black\",\n",
+ " label=\"spline nodes\",\n",
+ " zorder=10,\n",
+ " clip_on=False,\n",
+ ")\n",
"ax.set_ylim(ylim1, ax.get_ylim()[1])\n",
"ax.set_xlabel(\"time\")\n",
"ax.set_ylabel(\"pEpoR\")\n",
@@ -1079,19 +1261,40 @@
"# Plot ML fit for pSTAT5 (all regularization strengths)\n",
"fig, ax = plt.subplots(figsize=(6.5, 3.5))\n",
"for regstrength in sorted(regproblems.keys()):\n",
- " t, pSTAT5 = simulate_pSTAT5(problem=regproblems[regstrength], result=regresults[regstrength])\n",
+ " t, pSTAT5 = simulate_pSTAT5(\n",
+ " problem=regproblems[regstrength], result=regresults[regstrength]\n",
+ " )\n",
" if regstrength == chosen_regstrength:\n",
- " kwargs = dict(color='black', label=f'$\\\\mathbf{{\\\\lambda = {regstrength}}}$', zorder=2)\n",
+ " kwargs = dict(\n",
+ " color=\"black\",\n",
+ " label=f\"$\\\\mathbf{{\\\\lambda = {regstrength}}}$\",\n",
+ " zorder=2,\n",
+ " )\n",
" else:\n",
- " kwargs = dict(label=f'$\\\\lambda = {regstrength}$', alpha=0.5)\n",
+ " kwargs = dict(label=f\"$\\\\lambda = {regstrength}$\", alpha=0.5)\n",
" ax.plot(t, pSTAT5, **kwargs)\n",
- "ax.plot(df_pSTAT5['time'], df_pSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n",
+ "ax.plot(\n",
+ " df_pSTAT5[\"time\"],\n",
+ " df_pSTAT5[\"measurement\"],\n",
+ " \"o\",\n",
+ " color=\"black\",\n",
+ " markerfacecolor=\"none\",\n",
+ " label=\"experimental data\",\n",
+ ")\n",
"ylim1 = ax.get_ylim()[0]\n",
- "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n",
+ "ax.plot(\n",
+ " nodes,\n",
+ " len(nodes) * [ylim1],\n",
+ " \"x\",\n",
+ " color=\"black\",\n",
+ " label=\"spline nodes\",\n",
+ " zorder=10,\n",
+ " clip_on=False,\n",
+ ")\n",
"ax.set_ylim(ylim1, ax.get_ylim()[1])\n",
"ax.set_xlabel(\"time\")\n",
"ax.set_ylabel(\"pSTAT5\");\n",
- "#ax.legend();"
+ "# ax.legend();"
]
},
{
@@ -1117,19 +1320,40 @@
"# Plot ML fit for tSTAT5 (all regularization strengths)\n",
"fig, ax = plt.subplots(figsize=(6.5, 3.5))\n",
"for regstrength in sorted(regproblems.keys()):\n",
- " t, tSTAT5 = simulate_tSTAT5(problem=regproblems[regstrength], result=regresults[regstrength])\n",
+ " t, tSTAT5 = simulate_tSTAT5(\n",
+ " problem=regproblems[regstrength], result=regresults[regstrength]\n",
+ " )\n",
" if regstrength == chosen_regstrength:\n",
- " kwargs = dict(color='black', label=f'$\\\\mathbf{{\\\\lambda = {regstrength}}}$', zorder=2)\n",
+ " kwargs = dict(\n",
+ " color=\"black\",\n",
+ " label=f\"$\\\\mathbf{{\\\\lambda = {regstrength}}}$\",\n",
+ " zorder=2,\n",
+ " )\n",
" else:\n",
- " kwargs = dict(label=f'$\\\\lambda = {regstrength}$', alpha=0.5)\n",
+ " kwargs = dict(label=f\"$\\\\lambda = {regstrength}$\", alpha=0.5)\n",
" ax.plot(t, tSTAT5, **kwargs)\n",
- "ax.plot(df_tSTAT5['time'], df_tSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n",
+ "ax.plot(\n",
+ " df_tSTAT5[\"time\"],\n",
+ " df_tSTAT5[\"measurement\"],\n",
+ " \"o\",\n",
+ " color=\"black\",\n",
+ " markerfacecolor=\"none\",\n",
+ " label=\"experimental data\",\n",
+ ")\n",
"ylim1 = ax.get_ylim()[0]\n",
- "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n",
+ "ax.plot(\n",
+ " nodes,\n",
+ " len(nodes) * [ylim1],\n",
+ " \"x\",\n",
+ " color=\"black\",\n",
+ " label=\"spline nodes\",\n",
+ " zorder=10,\n",
+ " clip_on=False,\n",
+ ")\n",
"ax.set_ylim(ylim1, ax.get_ylim()[1])\n",
"ax.set_xlabel(\"time\")\n",
"ax.set_ylabel(\"tSTAT5\");\n",
- "#ax.legend();"
+ "# ax.legend();"
]
},
{
@@ -1154,13 +1378,39 @@
"source": [
"# Plot ML fit for pEpoR (single regularization strength with noise model)\n",
"fig, ax = plt.subplots(figsize=(6.5, 3.5))\n",
- "t, pEpoR = simulate_pEpoR(problem=regproblems[chosen_regstrength], result=regresults[chosen_regstrength])\n",
+ "t, pEpoR = simulate_pEpoR(\n",
+ " problem=regproblems[chosen_regstrength],\n",
+ " result=regresults[chosen_regstrength],\n",
+ ")\n",
"sigma_pEpoR = 0.0274 + 0.1 * pEpoR\n",
- "ax.fill_between(t, pEpoR - 2*sigma_pEpoR, pEpoR + 2*sigma_pEpoR, color='black', alpha=0.10, interpolate=True, label='2-sigma error bands')\n",
- "ax.plot(t, pEpoR, color='black', label='MLE')\n",
- "ax.plot(df_pEpoR['time'], df_pEpoR['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n",
+ "ax.fill_between(\n",
+ " t,\n",
+ " pEpoR - 2 * sigma_pEpoR,\n",
+ " pEpoR + 2 * sigma_pEpoR,\n",
+ " color=\"black\",\n",
+ " alpha=0.10,\n",
+ " interpolate=True,\n",
+ " label=\"2-sigma error bands\",\n",
+ ")\n",
+ "ax.plot(t, pEpoR, color=\"black\", label=\"MLE\")\n",
+ "ax.plot(\n",
+ " df_pEpoR[\"time\"],\n",
+ " df_pEpoR[\"measurement\"],\n",
+ " \"o\",\n",
+ " color=\"black\",\n",
+ " markerfacecolor=\"none\",\n",
+ " label=\"experimental data\",\n",
+ ")\n",
"ylim1 = ax.get_ylim()[0]\n",
- "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n",
+ "ax.plot(\n",
+ " nodes,\n",
+ " len(nodes) * [ylim1],\n",
+ " \"x\",\n",
+ " color=\"black\",\n",
+ " label=\"spline nodes\",\n",
+ " zorder=10,\n",
+ " clip_on=False,\n",
+ ")\n",
"ax.set_ylim(ylim1, ax.get_ylim()[1])\n",
"ax.set_xlabel(\"time\")\n",
"ax.set_ylabel(\"pEpoR\")\n",
@@ -1178,7 +1428,10 @@
"outputs": [],
"source": [
"# Store results for later\n",
- "all_results['15 nodes, FD'] = (regproblems[chosen_regstrength], regresults[chosen_regstrength])"
+ "all_results[\"15 nodes, FD\"] = (\n",
+ " regproblems[chosen_regstrength],\n",
+ " regresults[chosen_regstrength],\n",
+ ")"
]
},
{
@@ -1232,14 +1485,20 @@
"source": [
"# Create spline for pEpoR\n",
"nodes = [0, 5, 10, 20, 60]\n",
- "values_at_nodes = [sp.Symbol(f\"pEpoR_t{str(t).replace('.', '_dot_')}\") for t in nodes]\n",
- "derivatives_at_nodes = [sp.Symbol(f\"derivative_pEpoR_t{str(t).replace('.', '_dot_')}\") for t in nodes[:-1]]\n",
+ "values_at_nodes = [\n",
+ " sp.Symbol(f\"pEpoR_t{str(t).replace('.', '_dot_')}\") for t in nodes\n",
+ "]\n",
+ "derivatives_at_nodes = [\n",
+ " sp.Symbol(f\"derivative_pEpoR_t{str(t).replace('.', '_dot_')}\")\n",
+ " for t in nodes[:-1]\n",
+ "]\n",
"spline = amici.splines.CubicHermiteSpline(\n",
- " sbml_id='pEpoR',\n",
+ " sbml_id=\"pEpoR\",\n",
" evaluate_at=amici.sbml_utils.amici_time_symbol,\n",
" nodes=nodes,\n",
" values_at_nodes=values_at_nodes,\n",
- " derivatives_at_nodes=derivatives_at_nodes + [0], # last value is zero because steady state is reached\n",
+ " derivatives_at_nodes=derivatives_at_nodes\n",
+ " + [0], # last value is zero because steady state is reached\n",
" extrapolate=(None, \"constant\"),\n",
" bc=\"auto\",\n",
" logarithmic_parametrization=True,\n",
@@ -1270,17 +1529,34 @@
"source": [
"# Add a parameter for regularization strength\n",
"reg_parameters_df = pd.DataFrame(\n",
- " dict(parameterScale='log10', lowerBound=1e-6, upperBound=1e6, nominalValue=1.0, estimate=0),\n",
- " index=pd.Series(['regularization_strength'], name=\"parameterId\"),\n",
+ " dict(\n",
+ " parameterScale=\"log10\",\n",
+ " lowerBound=1e-6,\n",
+ " upperBound=1e6,\n",
+ " nominalValue=1.0,\n",
+ " estimate=0,\n",
+ " ),\n",
+ " index=pd.Series([\"regularization_strength\"], name=\"parameterId\"),\n",
")\n",
"# Encode regularization term as an additional observable\n",
"reg_observables_df = pd.DataFrame(\n",
- " dict(observableFormula=f'sqrt({regularization})'.replace('**', '^'), observableTransformation='lin', noiseFormula='1/sqrt(regularization_strength)', noiseDistribution='normal'),\n",
- " index=pd.Series(['regularization'], name=\"observableId\"),\n",
+ " dict(\n",
+ " observableFormula=f\"sqrt({regularization})\".replace(\"**\", \"^\"),\n",
+ " observableTransformation=\"lin\",\n",
+ " noiseFormula=\"1/sqrt(regularization_strength)\",\n",
+ " noiseDistribution=\"normal\",\n",
+ " ),\n",
+ " index=pd.Series([\"regularization\"], name=\"observableId\"),\n",
")\n",
"# and correspoding measurement\n",
"reg_measurements_df = pd.DataFrame(\n",
- " dict(observableId='regularization', simulationConditionId='condition1', measurement=0, time=0, observableTransformation='lin'),\n",
+ " dict(\n",
+ " observableId=\"regularization\",\n",
+ " simulationConditionId=\"condition1\",\n",
+ " measurement=0,\n",
+ " time=0,\n",
+ " observableTransformation=\"lin\",\n",
+ " ),\n",
" index=pd.Series([0]),\n",
")"
]
@@ -1295,9 +1571,13 @@
"outputs": [],
"source": [
"# Add spline formula to SBML model\n",
- "sbml_doc = libsbml.SBMLReader().readSBML(os.path.join('Swameye_PNAS2003', 'swameye2003_model.xml'))\n",
+ "sbml_doc = libsbml.SBMLReader().readSBML(\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_model.xml\")\n",
+ ")\n",
"sbml_model = sbml_doc.getModel()\n",
- "spline.add_to_sbml_model(sbml_model, auto_add=True, y_nominal=0.1, y_constant=True)"
+ "spline.add_to_sbml_model(\n",
+ " sbml_model, auto_add=True, y_nominal=0.1, y_constant=True\n",
+ ")"
]
},
{
@@ -1325,11 +1605,23 @@
"source": [
"# Extra parameters associated to the spline\n",
"spline_parameters_df1 = pd.DataFrame(\n",
- " dict(parameterScale='log', lowerBound=0.001, upperBound=10, nominalValue=0.1, estimate=1),\n",
+ " dict(\n",
+ " parameterScale=\"log\",\n",
+ " lowerBound=0.001,\n",
+ " upperBound=10,\n",
+ " nominalValue=0.1,\n",
+ " estimate=1,\n",
+ " ),\n",
" index=pd.Series(list(map(str, values_at_nodes)), name=\"parameterId\"),\n",
")\n",
"spline_parameters_df2 = pd.DataFrame(\n",
- " dict(parameterScale='lin', lowerBound=-0.666, upperBound=0.666, nominalValue=0.0, estimate=1),\n",
+ " dict(\n",
+ " parameterScale=\"lin\",\n",
+ " lowerBound=-0.666,\n",
+ " upperBound=0.666,\n",
+ " nominalValue=0.0,\n",
+ " estimate=1,\n",
+ " ),\n",
" index=pd.Series(list(map(str, derivatives_at_nodes)), name=\"parameterId\"),\n",
")"
]
@@ -1346,18 +1638,31 @@
"# Create PEtab problem\n",
"petab_problem = petab.Problem(\n",
" sbml_model,\n",
- " condition_df=petab.conditions.get_condition_df(os.path.join('Swameye_PNAS2003', 'swameye2003_conditions.tsv')),\n",
+ " condition_df=petab.conditions.get_condition_df(\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_conditions.tsv\")\n",
+ " ),\n",
" measurement_df=petab.core.concat_tables(\n",
- " [os.path.join('Swameye_PNAS2003', 'swameye2003_measurements.tsv'), reg_measurements_df],\n",
- " petab.measurements.get_measurement_df\n",
+ " [\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_measurements.tsv\"),\n",
+ " reg_measurements_df,\n",
+ " ],\n",
+ " petab.measurements.get_measurement_df,\n",
" ).reset_index(drop=True),\n",
" parameter_df=petab.core.concat_tables(\n",
- " [os.path.join('Swameye_PNAS2003', 'swameye2003_parameters.tsv'), spline_parameters_df1, spline_parameters_df2, reg_parameters_df],\n",
- " petab.parameters.get_parameter_df\n",
+ " [\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_parameters.tsv\"),\n",
+ " spline_parameters_df1,\n",
+ " spline_parameters_df2,\n",
+ " reg_parameters_df,\n",
+ " ],\n",
+ " petab.parameters.get_parameter_df,\n",
" ),\n",
" observable_df=petab.core.concat_tables(\n",
- " [os.path.join('Swameye_PNAS2003', 'swameye2003_observables.tsv'), reg_observables_df],\n",
- " petab.observables.get_observable_df\n",
+ " [\n",
+ " os.path.join(\"Swameye_PNAS2003\", \"swameye2003_observables.tsv\"),\n",
+ " reg_observables_df,\n",
+ " ],\n",
+ " petab.observables.get_observable_df,\n",
" ),\n",
")"
]
@@ -1437,7 +1742,9 @@
"outputs": [],
"source": [
"# Import PEtab problem into pyPESTO\n",
- "pypesto_problem = pypesto.petab.PetabImporter(petab_problem, model_name=name).create_problem()"
+ "pypesto_problem = pypesto.petab.PetabImporter(\n",
+ " petab_problem, model_name=name\n",
+ ").create_problem()"
]
},
{
@@ -1459,7 +1766,7 @@
"source": [
"# Try different regularization strengths\n",
"regstrengths = np.asarray([1, 175, 500, 1000])\n",
- "if os.getenv('GITHUB_ACTIONS') is not None:\n",
+ "if os.getenv(\"GITHUB_ACTIONS\") is not None:\n",
" regstrengths = np.asarray([175])\n",
"regproblems = {}\n",
"regresults = {}\n",
@@ -1468,14 +1775,16 @@
" # Fix parameter in pypesto problem\n",
" name = f\"Swameye_PNAS2003_5nodes_reg{regstrength}\"\n",
" pypesto_problem.fix_parameters(\n",
- " pypesto_problem.x_names.index('regularization_strength'),\n",
- " np.log10(regstrength) # parameter is specified as log10 scale in PEtab\n",
+ " pypesto_problem.x_names.index(\"regularization_strength\"),\n",
+ " np.log10(\n",
+ " regstrength\n",
+ " ), # parameter is specified as log10 scale in PEtab\n",
" )\n",
" regproblem = copy.deepcopy(pypesto_problem)\n",
"\n",
" # Load existing results if available\n",
- " if os.path.exists(f'{name}.h5'):\n",
- " regresult = pypesto.store.read_result(f'{name}.h5', problem=regproblem)\n",
+ " if os.path.exists(f\"{name}.h5\"):\n",
+ " regresult = pypesto.store.read_result(f\"{name}.h5\", problem=regproblem)\n",
" else:\n",
" regresult = None\n",
" # Overwrite\n",
@@ -1487,7 +1796,9 @@
" new_ids = [str(i) for i in range(n_starts)]\n",
" else:\n",
" last_id = max(int(i) for i in regresult.optimize_result.id)\n",
- " new_ids = [str(i) for i in range(last_id+1, last_id+n_starts+1)]\n",
+ " new_ids = [\n",
+ " str(i) for i in range(last_id + 1, last_id + n_starts + 1)\n",
+ " ]\n",
" regresult = pypesto.optimize.minimize(\n",
" regproblem,\n",
" n_starts=n_starts,\n",
@@ -1498,7 +1809,9 @@
" )\n",
" regresult.optimize_result.sort()\n",
" if regresult.optimize_result.x[0] is None:\n",
- " raise Exception(\"All multistarts failed (n_starts is probably too small)! If this error occurred during CI, just run the workflow again.\")\n",
+ " raise Exception(\n",
+ " \"All multistarts failed (n_starts is probably too small)! If this error occurred during CI, just run the workflow again.\"\n",
+ " )\n",
"\n",
" # Save results to disk\n",
" # pypesto.store.write_result(regresult, f'{name}.h5', overwrite=True)\n",
@@ -1534,15 +1847,21 @@
"regstrengths = sorted(regproblems.keys())\n",
"stats = []\n",
"for regstrength in regstrengths:\n",
- " t, pEpoR = simulate_pEpoR(N=None, problem=regproblems[regstrength], result=regresults[regstrength])\n",
- " assert np.array_equal(df_pEpoR['time'], t[:-1])\n",
+ " t, pEpoR = simulate_pEpoR(\n",
+ " N=None,\n",
+ " problem=regproblems[regstrength],\n",
+ " result=regresults[regstrength],\n",
+ " )\n",
+ " assert np.array_equal(df_pEpoR[\"time\"], t[:-1])\n",
" pEpoR = pEpoR[:-1]\n",
" sigma_pEpoR = 0.0274 + 0.1 * pEpoR\n",
- " stat = np.sum(((pEpoR - df_pEpoR['measurement']) / sigma_pEpoR)**2)\n",
+ " stat = np.sum(((pEpoR - df_pEpoR[\"measurement\"]) / sigma_pEpoR) ** 2)\n",
" print(f\"Regularization strength: {regstrength}. Statistic is {stat}\")\n",
" stats.append(stat)\n",
"# Select best regularization strength\n",
- "chosen_regstrength = regstrengths[np.abs(np.asarray(stats) - len(df_pEpoR['time'])).argmin()]"
+ "chosen_regstrength = regstrengths[\n",
+ " np.abs(np.asarray(stats) - len(df_pEpoR[\"time\"])).argmin()\n",
+ "]"
]
},
{
@@ -1566,8 +1885,12 @@
],
"source": [
"# Visualize the results of the multistarts for a chosen regularization strength\n",
- "ax = pypesto.visualize.waterfall(regresults[chosen_regstrength], size=[6.5, 3.5])\n",
- "ax.set_title(f\"Waterfall plot (regularization strength = {chosen_regstrength})\")\n",
+ "ax = pypesto.visualize.waterfall(\n",
+ " regresults[chosen_regstrength], size=[6.5, 3.5]\n",
+ ")\n",
+ "ax.set_title(\n",
+ " f\"Waterfall plot (regularization strength = {chosen_regstrength})\"\n",
+ ")\n",
"ax.set_ylim(ax.get_ylim()[0], 100);"
]
},
@@ -1594,15 +1917,36 @@
"# Plot ML fit for pEpoR (all regularization strengths)\n",
"fig, ax = plt.subplots(figsize=(6.5, 3.5))\n",
"for regstrength in sorted(regproblems.keys()):\n",
- " t, pEpoR = simulate_pEpoR(problem=regproblems[regstrength], result=regresults[regstrength])\n",
+ " t, pEpoR = simulate_pEpoR(\n",
+ " problem=regproblems[regstrength], result=regresults[regstrength]\n",
+ " )\n",
" if regstrength == chosen_regstrength:\n",
- " kwargs = dict(color='black', label=f'$\\\\mathbf{{\\\\lambda = {regstrength}}}$', zorder=2)\n",
+ " kwargs = dict(\n",
+ " color=\"black\",\n",
+ " label=f\"$\\\\mathbf{{\\\\lambda = {regstrength}}}$\",\n",
+ " zorder=2,\n",
+ " )\n",
" else:\n",
- " kwargs = dict(label=f'$\\\\lambda = {regstrength}$', alpha=0.5)\n",
+ " kwargs = dict(label=f\"$\\\\lambda = {regstrength}$\", alpha=0.5)\n",
" ax.plot(t, pEpoR, **kwargs)\n",
- "ax.plot(df_pEpoR['time'], df_pEpoR['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n",
+ "ax.plot(\n",
+ " df_pEpoR[\"time\"],\n",
+ " df_pEpoR[\"measurement\"],\n",
+ " \"o\",\n",
+ " color=\"black\",\n",
+ " markerfacecolor=\"none\",\n",
+ " label=\"experimental data\",\n",
+ ")\n",
"ylim1 = ax.get_ylim()[0]\n",
- "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n",
+ "ax.plot(\n",
+ " nodes,\n",
+ " len(nodes) * [ylim1],\n",
+ " \"x\",\n",
+ " color=\"black\",\n",
+ " label=\"spline nodes\",\n",
+ " zorder=10,\n",
+ " clip_on=False,\n",
+ ")\n",
"ax.set_ylim(ylim1, ax.get_ylim()[1])\n",
"ax.set_xlabel(\"time\")\n",
"ax.set_ylabel(\"pEpoR\")\n",
@@ -1637,15 +1981,36 @@
"# Plot ML fit for pSTAT5 (all regularization strengths)\n",
"fig, ax = plt.subplots(figsize=(6.5, 3.5))\n",
"for regstrength in sorted(regproblems.keys()):\n",
- " t, pSTAT5 = simulate_pSTAT5(problem=regproblems[regstrength], result=regresults[regstrength])\n",
+ " t, pSTAT5 = simulate_pSTAT5(\n",
+ " problem=regproblems[regstrength], result=regresults[regstrength]\n",
+ " )\n",
" if regstrength == chosen_regstrength:\n",
- " kwargs = dict(color='black', label=f'$\\\\mathbf{{\\\\lambda = {regstrength}}}$', zorder=2)\n",
+ " kwargs = dict(\n",
+ " color=\"black\",\n",
+ " label=f\"$\\\\mathbf{{\\\\lambda = {regstrength}}}$\",\n",
+ " zorder=2,\n",
+ " )\n",
" else:\n",
- " kwargs = dict(label=f'$\\\\lambda = {regstrength}$', alpha=0.5)\n",
+ " kwargs = dict(label=f\"$\\\\lambda = {regstrength}$\", alpha=0.5)\n",
" ax.plot(t, pSTAT5, **kwargs)\n",
- "ax.plot(df_pSTAT5['time'], df_pSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n",
+ "ax.plot(\n",
+ " df_pSTAT5[\"time\"],\n",
+ " df_pSTAT5[\"measurement\"],\n",
+ " \"o\",\n",
+ " color=\"black\",\n",
+ " markerfacecolor=\"none\",\n",
+ " label=\"experimental data\",\n",
+ ")\n",
"ylim1 = ax.get_ylim()[0]\n",
- "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n",
+ "ax.plot(\n",
+ " nodes,\n",
+ " len(nodes) * [ylim1],\n",
+ " \"x\",\n",
+ " color=\"black\",\n",
+ " label=\"spline nodes\",\n",
+ " zorder=10,\n",
+ " clip_on=False,\n",
+ ")\n",
"ax.set_ylim(ylim1, ax.get_ylim()[1])\n",
"ax.set_xlabel(\"time\")\n",
"ax.set_ylabel(\"pSTAT5\");\n",
@@ -1675,15 +2040,36 @@
"# Plot ML fit for tSTAT5 (all regularization strengths)\n",
"fig, ax = plt.subplots(figsize=(6.5, 3.5))\n",
"for regstrength in sorted(regproblems.keys()):\n",
- " t, tSTAT5 = simulate_tSTAT5(problem=regproblems[regstrength], result=regresults[regstrength])\n",
+ " t, tSTAT5 = simulate_tSTAT5(\n",
+ " problem=regproblems[regstrength], result=regresults[regstrength]\n",
+ " )\n",
" if regstrength == chosen_regstrength:\n",
- " kwargs = dict(color='black', label=f'$\\\\mathbf{{\\\\lambda = {regstrength}}}$', zorder=2)\n",
+ " kwargs = dict(\n",
+ " color=\"black\",\n",
+ " label=f\"$\\\\mathbf{{\\\\lambda = {regstrength}}}$\",\n",
+ " zorder=2,\n",
+ " )\n",
" else:\n",
- " kwargs = dict(label=f'$\\\\lambda = {regstrength}$', alpha=0.5)\n",
+ " kwargs = dict(label=f\"$\\\\lambda = {regstrength}$\", alpha=0.5)\n",
" ax.plot(t, tSTAT5, **kwargs)\n",
- "ax.plot(df_tSTAT5['time'], df_tSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n",
+ "ax.plot(\n",
+ " df_tSTAT5[\"time\"],\n",
+ " df_tSTAT5[\"measurement\"],\n",
+ " \"o\",\n",
+ " color=\"black\",\n",
+ " markerfacecolor=\"none\",\n",
+ " label=\"experimental data\",\n",
+ ")\n",
"ylim1 = ax.get_ylim()[0]\n",
- "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n",
+ "ax.plot(\n",
+ " nodes,\n",
+ " len(nodes) * [ylim1],\n",
+ " \"x\",\n",
+ " color=\"black\",\n",
+ " label=\"spline nodes\",\n",
+ " zorder=10,\n",
+ " clip_on=False,\n",
+ ")\n",
"ax.set_ylim(ylim1, ax.get_ylim()[1])\n",
"ax.set_xlabel(\"time\")\n",
"ax.set_ylabel(\"tSTAT5\");\n",
@@ -1712,13 +2098,39 @@
"source": [
"# Plot ML fit for pEpoR (single regularization strength with noise model)\n",
"fig, ax = plt.subplots(figsize=(6.5, 3.5))\n",
- "t, pEpoR = simulate_pEpoR(problem=regproblems[chosen_regstrength], result=regresults[chosen_regstrength])\n",
+ "t, pEpoR = simulate_pEpoR(\n",
+ " problem=regproblems[chosen_regstrength],\n",
+ " result=regresults[chosen_regstrength],\n",
+ ")\n",
"sigma_pEpoR = 0.0274 + 0.1 * pEpoR\n",
- "ax.fill_between(t, pEpoR - 2*sigma_pEpoR, pEpoR + 2*sigma_pEpoR, color='black', alpha=0.10, interpolate=True, label='2-sigma error bands')\n",
- "ax.plot(t, pEpoR, color='black', label='MLE')\n",
- "ax.plot(df_pEpoR['time'], df_pEpoR['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n",
+ "ax.fill_between(\n",
+ " t,\n",
+ " pEpoR - 2 * sigma_pEpoR,\n",
+ " pEpoR + 2 * sigma_pEpoR,\n",
+ " color=\"black\",\n",
+ " alpha=0.10,\n",
+ " interpolate=True,\n",
+ " label=\"2-sigma error bands\",\n",
+ ")\n",
+ "ax.plot(t, pEpoR, color=\"black\", label=\"MLE\")\n",
+ "ax.plot(\n",
+ " df_pEpoR[\"time\"],\n",
+ " df_pEpoR[\"measurement\"],\n",
+ " \"o\",\n",
+ " color=\"black\",\n",
+ " markerfacecolor=\"none\",\n",
+ " label=\"experimental data\",\n",
+ ")\n",
"ylim1 = ax.get_ylim()[0]\n",
- "ax.plot(nodes, len(nodes)*[ylim1], 'x', color='black', label='spline nodes', zorder=10, clip_on=False)\n",
+ "ax.plot(\n",
+ " nodes,\n",
+ " len(nodes) * [ylim1],\n",
+ " \"x\",\n",
+ " color=\"black\",\n",
+ " label=\"spline nodes\",\n",
+ " zorder=10,\n",
+ " clip_on=False,\n",
+ ")\n",
"ax.set_ylim(ylim1, ax.get_ylim()[1])\n",
"ax.set_xlabel(\"time\")\n",
"ax.set_ylabel(\"pEpoR\")\n",
@@ -1736,7 +2148,10 @@
"outputs": [],
"source": [
"# Store results for later\n",
- "all_results['5 nodes'] = (regproblems[chosen_regstrength], regresults[chosen_regstrength])"
+ "all_results[\"5 nodes\"] = (\n",
+ " regproblems[chosen_regstrength],\n",
+ " regresults[chosen_regstrength],\n",
+ ")"
]
},
{
@@ -1769,10 +2184,17 @@
"source": [
"# Plot ML fit for pEpoR\n",
"fig, ax = plt.subplots(figsize=(6.5, 3.5))\n",
- "for (label, (problem, result)) in all_results.items():\n",
+ "for label, (problem, result) in all_results.items():\n",
" t, pEpoR = simulate_pEpoR(problem=problem, result=result)\n",
" ax.plot(t, pEpoR, label=label)\n",
- "ax.plot(df_pEpoR['time'], df_pEpoR['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n",
+ "ax.plot(\n",
+ " df_pEpoR[\"time\"],\n",
+ " df_pEpoR[\"measurement\"],\n",
+ " \"o\",\n",
+ " color=\"black\",\n",
+ " markerfacecolor=\"none\",\n",
+ " label=\"experimental data\",\n",
+ ")\n",
"ax.set_xlabel(\"time\")\n",
"ax.set_ylabel(\"pEpoR\")\n",
"ax.legend();"
@@ -1800,10 +2222,17 @@
"source": [
"# Plot ML fit for pSTAT5\n",
"fig, ax = plt.subplots(figsize=(6.5, 3.5))\n",
- "for (label, (problem, result)) in all_results.items():\n",
+ "for label, (problem, result) in all_results.items():\n",
" t, pSTAT5 = simulate_pSTAT5(problem=problem, result=result)\n",
" ax.plot(t, pSTAT5, label=label)\n",
- "ax.plot(df_pSTAT5['time'], df_pSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n",
+ "ax.plot(\n",
+ " df_pSTAT5[\"time\"],\n",
+ " df_pSTAT5[\"measurement\"],\n",
+ " \"o\",\n",
+ " color=\"black\",\n",
+ " markerfacecolor=\"none\",\n",
+ " label=\"experimental data\",\n",
+ ")\n",
"ax.set_xlabel(\"time\")\n",
"ax.set_ylabel(\"pSTAT5\")\n",
"ax.legend();"
@@ -1831,10 +2260,17 @@
"source": [
"# Plot ML fit for tSTAT5\n",
"fig, ax = plt.subplots(figsize=(6.5, 3.5))\n",
- "for (label, (problem, result)) in all_results.items():\n",
+ "for label, (problem, result) in all_results.items():\n",
" t, tSTAT5 = simulate_tSTAT5(problem=problem, result=result)\n",
" ax.plot(t, tSTAT5, label=label)\n",
- "ax.plot(df_tSTAT5['time'], df_tSTAT5['measurement'], 'o', color='black', markerfacecolor='none', label='experimental data')\n",
+ "ax.plot(\n",
+ " df_tSTAT5[\"time\"],\n",
+ " df_tSTAT5[\"measurement\"],\n",
+ " \"o\",\n",
+ " color=\"black\",\n",
+ " markerfacecolor=\"none\",\n",
+ " label=\"experimental data\",\n",
+ ")\n",
"ax.set_xlabel(\"time\")\n",
"ax.set_ylabel(\"tSTAT5\")\n",
"ax.legend();"
@@ -1918,14 +2354,14 @@
],
"source": [
"# Compare parameter values\n",
- "for (label, (problem, result)) in all_results.items():\n",
+ "for label, (problem, result) in all_results.items():\n",
" print(f\"\\n### {label}\")\n",
" x = result.optimize_result.x[0]\n",
" if len(x) == len(problem.x_free_indices):\n",
" names = problem.x_names[problem.x_free_indices]\n",
" else:\n",
" names = problem.x_names\n",
- " for (name, value) in zip(names, x):\n",
+ " for name, value in zip(names, x):\n",
" print(f\"{name} = {value}\")"
]
},
diff --git a/python/examples/example_steadystate/ExampleSteadystate.ipynb b/python/examples/example_steadystate/ExampleSteadystate.ipynb
index 0d9765e727..b57ed522aa 100644
--- a/python/examples/example_steadystate/ExampleSteadystate.ipynb
+++ b/python/examples/example_steadystate/ExampleSteadystate.ipynb
@@ -4,9 +4,14 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "# AMICI Python example \"steadystate\"\n",
+ "# SBML import, observation model, sensitivity analysis, data export and visualization\n",
"\n",
- "This is an example using the [model_steadystate_scaled.sbml] model to demonstrate and test SBML import and AMICI Python interface."
+ "This is an example using the [model_steadystate_scaled.sbml] model to demonstrate:\n",
+ "\n",
+ "* SBML import\n",
+ "* specifying the observation model\n",
+ "* performing sensitivity analysis\n",
+ "* exporting and visualizing simulation results"
]
},
{
@@ -16,17 +21,14 @@
"outputs": [],
"source": [
"# SBML model we want to import\n",
- "sbml_file = 'model_steadystate_scaled_without_observables.xml'\n",
+ "sbml_file = \"model_steadystate_scaled_without_observables.xml\"\n",
"# Name of the model that will also be the name of the python module\n",
- "model_name = 'model_steadystate_scaled'\n",
+ "model_name = \"model_steadystate_scaled\"\n",
"# Directory to which the generated model code is written\n",
"model_output_dir = model_name\n",
"\n",
"import libsbml\n",
- "import importlib\n",
"import amici\n",
- "import os\n",
- "import sys\n",
"import numpy as np\n",
"import matplotlib.pyplot as plt"
]
@@ -67,18 +69,41 @@
"sbml_model = sbml_doc.getModel()\n",
"dir(sbml_doc)\n",
"\n",
- "print('Species: ', [s.getId() for s in sbml_model.getListOfSpecies()])\n",
+ "print(\"Species: \", [s.getId() for s in sbml_model.getListOfSpecies()])\n",
"\n",
- "print('\\nReactions:')\n",
+ "print(\"\\nReactions:\")\n",
"for reaction in sbml_model.getListOfReactions():\n",
- " reactants = ' + '.join(['%s %s'%(int(r.getStoichiometry()) if r.getStoichiometry() > 1 else '', r.getSpecies()) for r in reaction.getListOfReactants()])\n",
- " products = ' + '.join(['%s %s'%(int(r.getStoichiometry()) if r.getStoichiometry() > 1 else '', r.getSpecies()) for r in reaction.getListOfProducts()])\n",
- " reversible = '<' if reaction.getReversible() else ''\n",
- " print('%3s: %10s %1s->%10s\\t\\t[%s]' % (reaction.getId(),\n",
- " reactants,\n",
- " reversible,\n",
- " products,\n",
- " libsbml.formulaToL3String(reaction.getKineticLaw().getMath())))\n"
+ " reactants = \" + \".join(\n",
+ " [\n",
+ " \"%s %s\"\n",
+ " % (\n",
+ " int(r.getStoichiometry()) if r.getStoichiometry() > 1 else \"\",\n",
+ " r.getSpecies(),\n",
+ " )\n",
+ " for r in reaction.getListOfReactants()\n",
+ " ]\n",
+ " )\n",
+ " products = \" + \".join(\n",
+ " [\n",
+ " \"%s %s\"\n",
+ " % (\n",
+ " int(r.getStoichiometry()) if r.getStoichiometry() > 1 else \"\",\n",
+ " r.getSpecies(),\n",
+ " )\n",
+ " for r in reaction.getListOfProducts()\n",
+ " ]\n",
+ " )\n",
+ " reversible = \"<\" if reaction.getReversible() else \"\"\n",
+ " print(\n",
+ " \"%3s: %10s %1s->%10s\\t\\t[%s]\"\n",
+ " % (\n",
+ " reaction.getId(),\n",
+ " reactants,\n",
+ " reversible,\n",
+ " products,\n",
+ " libsbml.formulaToL3String(reaction.getKineticLaw().getMath()),\n",
+ " )\n",
+ " )"
]
},
{
@@ -122,7 +147,7 @@
"metadata": {},
"outputs": [],
"source": [
- "constantParameters = ['k0']"
+ "constant_parameters = [\"k0\"]"
]
},
{
@@ -144,12 +169,12 @@
"source": [
"# Define observables\n",
"observables = {\n",
- " 'observable_x1': {'name': '', 'formula': 'x1'},\n",
- " 'observable_x2': {'name': '', 'formula': 'x2'},\n",
- " 'observable_x3': {'name': '', 'formula': 'x3'},\n",
- " 'observable_x1_scaled': {'name': '', 'formula': 'scaling_x1 * x1'},\n",
- " 'observable_x2_offsetted': {'name': '', 'formula': 'offset_x2 + x2'},\n",
- " 'observable_x1withsigma': {'name': '', 'formula': 'x1'}\n",
+ " \"observable_x1\": {\"name\": \"\", \"formula\": \"x1\"},\n",
+ " \"observable_x2\": {\"name\": \"\", \"formula\": \"x2\"},\n",
+ " \"observable_x3\": {\"name\": \"\", \"formula\": \"x3\"},\n",
+ " \"observable_x1_scaled\": {\"name\": \"\", \"formula\": \"scaling_x1 * x1\"},\n",
+ " \"observable_x2_offsetted\": {\"name\": \"\", \"formula\": \"offset_x2 + x2\"},\n",
+ " \"observable_x1withsigma\": {\"name\": \"\", \"formula\": \"x1\"},\n",
"}"
]
},
@@ -168,7 +193,7 @@
"metadata": {},
"outputs": [],
"source": [
- "sigmas = {'observable_x1withsigma': 'observable_x1withsigma_sigma'}"
+ "sigmas = {\"observable_x1withsigma\": \"observable_x1withsigma_sigma\"}"
]
},
{
@@ -312,12 +337,15 @@
],
"source": [
"import logging\n",
- "sbml_importer.sbml2amici(model_name,\n",
- " model_output_dir,\n",
- " verbose=logging.INFO,\n",
- " observables=observables,\n",
- " constant_parameters=constantParameters,\n",
- " sigmas=sigmas)"
+ "\n",
+ "sbml_importer.sbml2amici(\n",
+ " model_name,\n",
+ " model_output_dir,\n",
+ " verbose=logging.INFO,\n",
+ " observables=observables,\n",
+ " constant_parameters=constant_parameters,\n",
+ " sigmas=sigmas,\n",
+ ")"
]
},
{
@@ -326,7 +354,7 @@
"source": [
"### Importing the module and loading the model\n",
"\n",
- "If everything went well, we need to add the previously selected model output directory to our PYTHON_PATH and are then ready to load newly generated model:"
+ "If everything went well, we can now import the newly generated Python module containing our model:"
]
},
{
@@ -335,8 +363,7 @@
"metadata": {},
"outputs": [],
"source": [
- "sys.path.insert(0, os.path.abspath(model_output_dir))\n",
- "model_module = importlib.import_module(model_name)"
+ "model_module = amici.import_model_module(model_name, model_output_dir)"
]
},
{
@@ -365,7 +392,7 @@
"source": [
"model = model_module.getModel()\n",
"\n",
- "print(\"Model name:\", model.getName())\n",
+ "print(\"Model name: \", model.getName())\n",
"print(\"Model parameters:\", model.getParameterIds())\n",
"print(\"Model outputs: \", model.getObservableIds())\n",
"print(\"Model states: \", model.getStateIds())"
@@ -418,8 +445,10 @@
}
],
"source": [
- "print('Simulation was run using model default parameters as specified in the SBML model:')\n",
- "print(model.getParameters())"
+ "print(\n",
+ " \"Simulation was run using model default parameters as specified in the SBML model:\"\n",
+ ")\n",
+ "print(dict(zip(model.getParameterIds(), model.getParameters())))"
]
},
{
@@ -827,25 +856,21 @@
}
],
"source": [
- "#np.set_printoptions(threshold=8, edgeitems=2)\n",
+ "# np.set_printoptions(threshold=8, edgeitems=2)\n",
"for key, value in rdata.items():\n",
- " print('%12s: ' % key, value)"
+ " print(\"%12s: \" % key, value)"
]
},
{
"cell_type": "code",
- "execution_count": 13,
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "(1.0, 0.5, 0.4, 2.0, 0.1, 2.0, 3.0, 0.2)\n"
- ]
- }
- ],
+ "execution_count": null,
+ "outputs": [],
"source": [
- "print(model.getParameters())"
+ "# In particular for interactive use, ReturnDataView.by_id() and amici.evaluate provides a more convenient way to access slices of the result:\n",
+ "# Time trajectory of observable observable_x1\n",
+ "print(f\"{rdata.by_id('observable_x1')=}\")\n",
+ "# Time trajectory of state variable x2\n",
+ "print(f\"{rdata.by_id('x2')=}\")"
],
"metadata": {
"collapsed": false,
@@ -891,10 +916,33 @@
],
"source": [
"import amici.plotting\n",
- "amici.plotting.plotStateTrajectories(rdata, model = None)\n",
- "amici.plotting.plotObservableTrajectories(rdata, model = None)"
+ "\n",
+ "amici.plotting.plot_state_trajectories(rdata, model=None)\n",
+ "amici.plotting.plot_observable_trajectories(rdata, model=None)"
]
},
+ {
+ "cell_type": "markdown",
+ "source": [
+ "We can also evaluate symbolic expressions of model quantities using `amici.numpy.evaluate`, or directly plot the results using `amici.plotting.plot_expressions`:"
+ ],
+ "metadata": {
+ "collapsed": false
+ }
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "outputs": [],
+ "source": [
+ "amici.plotting.plot_expressions(\n",
+ " \"observable_x1 + observable_x2 + observable_x3\", rdata=rdata\n",
+ ")"
+ ],
+ "metadata": {
+ "collapsed": false
+ }
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -934,7 +982,7 @@
"# Re-run simulation, this time passing \"experimental data\"\n",
"rdata = amici.runAmiciSimulation(model, solver, edata)\n",
"\n",
- "print('Log-likelihood %f' % rdata['llh'])"
+ "print(\"Log-likelihood %f\" % rdata[\"llh\"])"
]
},
{
@@ -967,9 +1015,13 @@
"solver.setSensitivityOrder(amici.SensitivityOrder.none)\n",
"rdata_ref = amici.runAmiciSimulation(model, solver, edata)\n",
"\n",
+ "\n",
"def get_simulation_error(solver):\n",
" rdata = amici.runAmiciSimulation(model, solver, edata)\n",
- " return np.mean(np.abs(rdata['x']-rdata_ref['x'])), np.mean(np.abs(rdata['llh']-rdata_ref['llh']))\n",
+ " return np.mean(np.abs(rdata[\"x\"] - rdata_ref[\"x\"])), np.mean(\n",
+ " np.abs(rdata[\"llh\"] - rdata_ref[\"llh\"])\n",
+ " )\n",
+ "\n",
"\n",
"def get_errors(tolfun, tols):\n",
" solver.setRelativeTolerance(1e-16)\n",
@@ -983,25 +1035,28 @@
" llh_errs.append(llh_err)\n",
" return x_errs, llh_errs\n",
"\n",
- "atols = np.logspace(-5,-15, 100)\n",
- "atol_x_errs, atol_llh_errs = get_errors('setAbsoluteTolerance', atols)\n",
"\n",
- "rtols = np.logspace(-5,-15, 100)\n",
- "rtol_x_errs, rtol_llh_errs = get_errors('setRelativeTolerance', rtols)\n",
+ "atols = np.logspace(-5, -15, 100)\n",
+ "atol_x_errs, atol_llh_errs = get_errors(\"setAbsoluteTolerance\", atols)\n",
+ "\n",
+ "rtols = np.logspace(-5, -15, 100)\n",
+ "rtol_x_errs, rtol_llh_errs = get_errors(\"setRelativeTolerance\", rtols)\n",
"\n",
"fig, axes = plt.subplots(1, 2, figsize=(15, 5))\n",
"\n",
+ "\n",
"def plot_error(tols, x_errs, llh_errs, tolname, ax):\n",
- " ax.plot(tols, x_errs, 'r-', label='x')\n",
- " ax.plot(tols, llh_errs, 'b-', label='llh')\n",
- " ax.set_xscale('log')\n",
- " ax.set_yscale('log')\n",
- " ax.set_xlabel(f'{tolname} tolerance')\n",
- " ax.set_ylabel('average numerical error')\n",
+ " ax.plot(tols, x_errs, \"r-\", label=\"x\")\n",
+ " ax.plot(tols, llh_errs, \"b-\", label=\"llh\")\n",
+ " ax.set_xscale(\"log\")\n",
+ " ax.set_yscale(\"log\")\n",
+ " ax.set_xlabel(f\"{tolname} tolerance\")\n",
+ " ax.set_ylabel(\"average numerical error\")\n",
" ax.legend()\n",
"\n",
- "plot_error(atols, atol_x_errs, atol_llh_errs, 'absolute', axes[0])\n",
- "plot_error(rtols, rtol_x_errs, rtol_llh_errs, 'relative', axes[1])\n",
+ "\n",
+ "plot_error(atols, atol_x_errs, atol_llh_errs, \"absolute\", axes[0])\n",
+ "plot_error(rtols, rtol_x_errs, rtol_llh_errs, \"relative\", axes[1])\n",
"\n",
"# reset relative tolerance to default value\n",
"solver.setRelativeTolerance(1e-8)\n",
@@ -1523,21 +1578,27 @@
"source": [
"model = model_module.getModel()\n",
"model.setTimepoints(np.linspace(0, 10, 11))\n",
- "model.requireSensitivitiesForAllParameters() # sensitivities w.r.t. all parameters\n",
+ "model.requireSensitivitiesForAllParameters() # sensitivities w.r.t. all parameters\n",
"# model.setParameterList([1, 2]) # sensitivities\n",
"# w.r.t. the specified parameters\n",
- "model.setParameterScale(amici.ParameterScaling.none) # parameters are used as-is (not log-transformed)\n",
+ "model.setParameterScale(\n",
+ " amici.ParameterScaling.none\n",
+ ") # parameters are used as-is (not log-transformed)\n",
"\n",
"solver = model.getSolver()\n",
- "solver.setSensitivityMethod(amici.SensitivityMethod.forward) # forward sensitivity analysis\n",
- "solver.setSensitivityOrder(amici.SensitivityOrder.first) # first-order sensitivities\n",
+ "solver.setSensitivityMethod(\n",
+ " amici.SensitivityMethod.forward\n",
+ ") # forward sensitivity analysis\n",
+ "solver.setSensitivityOrder(\n",
+ " amici.SensitivityOrder.first\n",
+ ") # first-order sensitivities\n",
"\n",
"rdata = amici.runAmiciSimulation(model, solver)\n",
"\n",
"# print sensitivity-related results\n",
"for key, value in rdata.items():\n",
- " if key.startswith('s'):\n",
- " print('%12s: ' % key, value)"
+ " if key.startswith(\"s\"):\n",
+ " print(\"%12s: \" % key, value)"
]
},
{
@@ -1568,13 +1629,15 @@
"# Set model options\n",
"model = model_module.getModel()\n",
"p_orig = np.array(model.getParameters())\n",
- "p_orig[list(model.getParameterIds()).index('observable_x1withsigma_sigma')] = 0.1 # Change default parameter\n",
+ "p_orig[\n",
+ " list(model.getParameterIds()).index(\"observable_x1withsigma_sigma\")\n",
+ "] = 0.1 # Change default parameter\n",
"model.setParameters(p_orig)\n",
"model.setParameterScale(amici.ParameterScaling.none)\n",
"model.setTimepoints(np.linspace(0, 10, 21))\n",
"\n",
"solver = model.getSolver()\n",
- "solver.setMaxSteps(10**4) # Set maximum number of steps for the solver\n",
+ "solver.setMaxSteps(10**4) # Set maximum number of steps for the solver\n",
"\n",
"# simulate time-course to get artificial data\n",
"rdata = amici.runAmiciSimulation(model, solver)\n",
@@ -1582,18 +1645,22 @@
"edata.fixedParameters = model.getFixedParameters()\n",
"# set sigma to 1.0 except for observable 5, so that p[7] is used instead\n",
"# (if we have sigma parameterized, the corresponding ExpData entries must NaN, otherwise they will override the parameter)\n",
- "edata.setObservedDataStdDev(rdata['t']*0+np.nan,\n",
- " list(model.getObservableIds()).index('observable_x1withsigma'))\n",
+ "edata.setObservedDataStdDev(\n",
+ " rdata[\"t\"] * 0 + np.nan,\n",
+ " list(model.getObservableIds()).index(\"observable_x1withsigma\"),\n",
+ ")\n",
"\n",
"# enable sensitivities\n",
- "solver.setSensitivityOrder(amici.SensitivityOrder.first) # First-order ...\n",
- "solver.setSensitivityMethod(amici.SensitivityMethod.adjoint) # ... adjoint sensitivities\n",
- "model.requireSensitivitiesForAllParameters() # ... w.r.t. all parameters\n",
+ "solver.setSensitivityOrder(amici.SensitivityOrder.first) # First-order ...\n",
+ "solver.setSensitivityMethod(\n",
+ " amici.SensitivityMethod.adjoint\n",
+ ") # ... adjoint sensitivities\n",
+ "model.requireSensitivitiesForAllParameters() # ... w.r.t. all parameters\n",
"\n",
"# compute adjoint sensitivities\n",
"rdata = amici.runAmiciSimulation(model, solver, edata)\n",
- "#print(rdata['sigmay'])\n",
- "print('Log-likelihood: %f\\nGradient: %s' % (rdata['llh'], rdata['sllh']))\n"
+ "# print(rdata['sigmay'])\n",
+ "print(\"Log-likelihood: %f\\nGradient: %s\" % (rdata[\"llh\"], rdata[\"sllh\"]))"
]
},
{
@@ -1657,12 +1724,13 @@
"source": [
"from scipy.optimize import check_grad\n",
"\n",
- "def func(x0, symbol='llh', x0full=None, plist=[], verbose=False):\n",
+ "\n",
+ "def func(x0, symbol=\"llh\", x0full=None, plist=[], verbose=False):\n",
" p = x0[:]\n",
" if len(plist):\n",
" p = x0full[:]\n",
" p[plist] = x0\n",
- " verbose and print('f: p=%s' % p)\n",
+ " verbose and print(\"f: p=%s\" % p)\n",
"\n",
" old_parameters = model.getParameters()\n",
" solver.setSensitivityOrder(amici.SensitivityOrder.none)\n",
@@ -1675,7 +1743,8 @@
" verbose and print(res)\n",
" return res\n",
"\n",
- "def grad(x0, symbol='llh', x0full=None, plist=[], verbose=False):\n",
+ "\n",
+ "def grad(x0, symbol=\"llh\", x0full=None, plist=[], verbose=False):\n",
" p = x0[:]\n",
" if len(plist):\n",
" model.setParameterList(plist)\n",
@@ -1683,7 +1752,7 @@
" p[plist] = x0\n",
" else:\n",
" model.requireSensitivitiesForAllParameters()\n",
- " verbose and print('g: p=%s' % p)\n",
+ " verbose and print(\"g: p=%s\" % p)\n",
"\n",
" old_parameters = model.getParameters()\n",
" solver.setSensitivityMethod(amici.SensitivityMethod.forward)\n",
@@ -1693,45 +1762,50 @@
"\n",
" model.setParameters(old_parameters)\n",
"\n",
- " res = rdata['s%s' % symbol]\n",
+ " res = rdata[\"s%s\" % symbol]\n",
" if not isinstance(res, float):\n",
" if len(res.shape) == 3:\n",
" res = np.sum(res, axis=(0, 2))\n",
" verbose and print(res)\n",
" return res\n",
"\n",
+ "\n",
"epsilon = 1e-4\n",
- "err_norm = check_grad(func, grad, p_orig, 'llh', epsilon=epsilon)\n",
- "print('sllh: |error|_2: %f' % err_norm)\n",
+ "err_norm = check_grad(func, grad, p_orig, \"llh\", epsilon=epsilon)\n",
+ "print(\"sllh: |error|_2: %f\" % err_norm)\n",
"# assert err_norm < 1e-6\n",
"print()\n",
"\n",
"for ip in range(model.np()):\n",
" plist = [ip]\n",
" p = p_orig.copy()\n",
- " err_norm = check_grad(func, grad, p[plist], 'llh', p, [ip], epsilon=epsilon)\n",
- " print('sllh: p[%d]: |error|_2: %f' % (ip, err_norm))\n",
+ " err_norm = check_grad(\n",
+ " func, grad, p[plist], \"llh\", p, [ip], epsilon=epsilon\n",
+ " )\n",
+ " print(\"sllh: p[%d]: |error|_2: %f\" % (ip, err_norm))\n",
"\n",
"print()\n",
"for ip in range(model.np()):\n",
" plist = [ip]\n",
" p = p_orig.copy()\n",
- " err_norm = check_grad(func, grad, p[plist], 'y', p, [ip], epsilon=epsilon)\n",
- " print('sy: p[%d]: |error|_2: %f' % (ip, err_norm))\n",
+ " err_norm = check_grad(func, grad, p[plist], \"y\", p, [ip], epsilon=epsilon)\n",
+ " print(\"sy: p[%d]: |error|_2: %f\" % (ip, err_norm))\n",
"\n",
"print()\n",
"for ip in range(model.np()):\n",
" plist = [ip]\n",
" p = p_orig.copy()\n",
- " err_norm = check_grad(func, grad, p[plist], 'x', p, [ip], epsilon=epsilon)\n",
- " print('sx: p[%d]: |error|_2: %f' % (ip, err_norm))\n",
+ " err_norm = check_grad(func, grad, p[plist], \"x\", p, [ip], epsilon=epsilon)\n",
+ " print(\"sx: p[%d]: |error|_2: %f\" % (ip, err_norm))\n",
"\n",
"print()\n",
"for ip in range(model.np()):\n",
" plist = [ip]\n",
" p = p_orig.copy()\n",
- " err_norm = check_grad(func, grad, p[plist], 'sigmay', p, [ip], epsilon=epsilon)\n",
- " print('ssigmay: p[%d]: |error|_2: %f' % (ip, err_norm))\n"
+ " err_norm = check_grad(\n",
+ " func, grad, p[plist], \"sigmay\", p, [ip], epsilon=epsilon\n",
+ " )\n",
+ " print(\"ssigmay: p[%d]: |error|_2: %f\" % (ip, err_norm))"
]
},
{
@@ -1742,45 +1816,56 @@
},
"outputs": [],
"source": [
- "eps=1e-4\n",
- "op=model.getParameters()\n",
+ "eps = 1e-4\n",
+ "op = model.getParameters()\n",
"\n",
"\n",
- "solver.setSensitivityMethod(amici.SensitivityMethod.forward) # forward sensitivity analysis\n",
- "solver.setSensitivityOrder(amici.SensitivityOrder.first) # first-order sensitivities\n",
+ "solver.setSensitivityMethod(\n",
+ " amici.SensitivityMethod.forward\n",
+ ") # forward sensitivity analysis\n",
+ "solver.setSensitivityOrder(\n",
+ " amici.SensitivityOrder.first\n",
+ ") # first-order sensitivities\n",
"model.requireSensitivitiesForAllParameters()\n",
"solver.setRelativeTolerance(1e-12)\n",
"rdata = amici.runAmiciSimulation(model, solver, edata)\n",
"\n",
- "def fd(x0, ip, eps, symbol='llh'):\n",
+ "\n",
+ "def fd(x0, ip, eps, symbol=\"llh\"):\n",
" p = list(x0[:])\n",
" old_parameters = model.getParameters()\n",
" solver.setSensitivityOrder(amici.SensitivityOrder.none)\n",
- " p[ip]+=eps\n",
+ " p[ip] += eps\n",
" model.setParameters(p)\n",
" rdata_f = amici.runAmiciSimulation(model, solver, edata)\n",
- " p[ip]-=2*eps\n",
+ " p[ip] -= 2 * eps\n",
" model.setParameters(p)\n",
" rdata_b = amici.runAmiciSimulation(model, solver, edata)\n",
"\n",
" model.setParameters(old_parameters)\n",
- " return (rdata_f[symbol]-rdata_b[symbol])/(2*eps)\n",
+ " return (rdata_f[symbol] - rdata_b[symbol]) / (2 * eps)\n",
+ "\n",
"\n",
"def plot_sensitivities(symbol, eps):\n",
- " fig, axes = plt.subplots(4,2, figsize=(15,10))\n",
+ " fig, axes = plt.subplots(4, 2, figsize=(15, 10))\n",
" for ip in range(4):\n",
" fd_approx = fd(model.getParameters(), ip, eps, symbol=symbol)\n",
"\n",
- " axes[ip,0].plot(edata.getTimepoints(), rdata[f's{symbol}'][:,ip,:], 'r-')\n",
- " axes[ip,0].plot(edata.getTimepoints(), fd_approx, 'k--')\n",
- " axes[ip,0].set_ylabel(f'sensitivity {symbol}')\n",
- " axes[ip,0].set_xlabel('time')\n",
- "\n",
- "\n",
- " axes[ip,1].plot(edata.getTimepoints(), np.abs(rdata[f's{symbol}'][:,ip,:]-fd_approx), 'k-')\n",
- " axes[ip,1].set_ylabel('difference to fd')\n",
- " axes[ip,1].set_xlabel('time')\n",
- " axes[ip,1].set_yscale('log')\n",
+ " axes[ip, 0].plot(\n",
+ " edata.getTimepoints(), rdata[f\"s{symbol}\"][:, ip, :], \"r-\"\n",
+ " )\n",
+ " axes[ip, 0].plot(edata.getTimepoints(), fd_approx, \"k--\")\n",
+ " axes[ip, 0].set_ylabel(f\"sensitivity {symbol}\")\n",
+ " axes[ip, 0].set_xlabel(\"time\")\n",
+ "\n",
+ " axes[ip, 1].plot(\n",
+ " edata.getTimepoints(),\n",
+ " np.abs(rdata[f\"s{symbol}\"][:, ip, :] - fd_approx),\n",
+ " \"k-\",\n",
+ " )\n",
+ " axes[ip, 1].set_ylabel(\"difference to fd\")\n",
+ " axes[ip, 1].set_xlabel(\"time\")\n",
+ " axes[ip, 1].set_yscale(\"log\")\n",
"\n",
" plt.tight_layout()\n",
" plt.show()"
@@ -1803,7 +1888,7 @@
}
],
"source": [
- "plot_sensitivities('x', eps)"
+ "plot_sensitivities(\"x\", eps)"
]
},
{
@@ -1823,7 +1908,7 @@
}
],
"source": [
- "plot_sensitivities('y', eps)"
+ "plot_sensitivities(\"y\", eps)"
]
},
{
@@ -1937,7 +2022,7 @@
],
"source": [
"# look at the States in rdata as DataFrame\n",
- "amici.getSimulationStatesAsDataFrame(model, [edata], [rdata])\n"
+ "amici.getSimulationStatesAsDataFrame(model, [edata], [rdata])"
]
}
],
@@ -1971,6 +2056,9 @@
"toc_position": {},
"toc_section_display": true,
"toc_window_display": false
+ },
+ "nbsphinx": {
+ "execute": "always"
}
},
"nbformat": 4,
diff --git a/python/sdist/amici/__init__.py b/python/sdist/amici/__init__.py
index 7160acb475..bcb7387fbf 100644
--- a/python/sdist/amici/__init__.py
+++ b/python/sdist/amici/__init__.py
@@ -66,9 +66,9 @@ def _imported_from_setup() -> bool:
# requires the AMICI extension during its installation, but seems
# unlikely...
frame_path = os.path.realpath(os.path.expanduser(frame.filename))
- if frame_path == os.path.join(package_root, "setup.py") or frame_path.endswith(
- f"{sep}setuptools{sep}build_meta.py"
- ):
+ if frame_path == os.path.join(
+ package_root, "setup.py"
+ ) or frame_path.endswith(f"{sep}setuptools{sep}build_meta.py"):
return True
return False
@@ -203,6 +203,8 @@ def _get_default_argument(func: Callable, arg: str) -> Any:
import inspect
signature = inspect.signature(func)
- if (default := signature.parameters[arg].default) is not inspect.Parameter.empty:
+ if (
+ default := signature.parameters[arg].default
+ ) is not inspect.Parameter.empty:
return default
raise ValueError(f"No default value for argument {arg} of {func}.")
diff --git a/python/sdist/amici/__main__.py b/python/sdist/amici/__main__.py
index b8fbc77c0f..165f5d9516 100644
--- a/python/sdist/amici/__main__.py
+++ b/python/sdist/amici/__main__.py
@@ -21,7 +21,9 @@ def print_info():
if hdf5_enabled:
features.append("HDF5")
- print(f"AMICI ({sys.platform}) version {__version__} ({','.join(features)})")
+ print(
+ f"AMICI ({sys.platform}) version {__version__} ({','.join(features)})"
+ )
if __name__ == "__main__":
diff --git a/python/sdist/amici/conserved_quantities_demartino.py b/python/sdist/amici/conserved_quantities_demartino.py
index c579558f71..5b04fa1479 100644
--- a/python/sdist/amici/conserved_quantities_demartino.py
+++ b/python/sdist/amici/conserved_quantities_demartino.py
@@ -60,7 +60,9 @@ def compute_moiety_conservation_laws(
if not done:
# construct interaction matrix
- J, J2, fields = _fill(stoichiometric_list, engaged_species, num_species)
+ J, J2, fields = _fill(
+ stoichiometric_list, engaged_species, num_species
+ )
# seed random number generator
if rng_seed is not False:
@@ -87,7 +89,10 @@ def compute_moiety_conservation_laws(
if timer == max_num_monte_carlo:
done = _relax(
- stoichiometric_list, conserved_moieties, num_reactions, num_species
+ stoichiometric_list,
+ conserved_moieties,
+ num_reactions,
+ num_species,
)
timer = 0
_reduce(int_kernel_dim, cls_species_idxs, cls_coefficients, num_species)
@@ -139,14 +144,23 @@ def log(*args, **kwargs):
if not engaged_species_idxs:
continue
log(
- f"Moiety number {i + 1} engages {len(engaged_species_idxs)} " "species:"
+ f"Moiety number {i + 1} engages {len(engaged_species_idxs)} "
+ "species:"
)
- for species_idx, coefficient in zip(engaged_species_idxs, coefficients):
- name = species_names[species_idx] if species_names else species_idx
+ for species_idx, coefficient in zip(
+ engaged_species_idxs, coefficients
+ ):
+ name = (
+ species_names[species_idx]
+ if species_names
+ else species_idx
+ )
log(f"\t{name}\t{coefficient}")
-def _qsort(k: int, km: int, order: MutableSequence[int], pivots: Sequence[int]) -> None:
+def _qsort(
+ k: int, km: int, order: MutableSequence[int], pivots: Sequence[int]
+) -> None:
"""Quicksort
Recursive implementation of the quicksort algorithm
@@ -230,7 +244,9 @@ def _kernel(
matrix2[i].append(1)
order: List[int] = list(range(num_species))
- pivots = [matrix[i][0] if len(matrix[i]) else _MAX for i in range(num_species)]
+ pivots = [
+ matrix[i][0] if len(matrix[i]) else _MAX for i in range(num_species)
+ ]
done = False
while not done:
@@ -241,7 +257,8 @@ def _kernel(
if len(matrix[order[j]]) > 1:
for i in range(len(matrix[order[j]])):
min1 = min(
- min1, abs(matrix2[order[j]][0] / matrix2[order[j]][i])
+ min1,
+ abs(matrix2[order[j]][0] / matrix2[order[j]][i]),
)
min2 = _MAX
@@ -249,7 +266,10 @@ def _kernel(
for i in range(len(matrix[order[j + 1]])):
min2 = min(
min2,
- abs(matrix2[order[j + 1]][0] / matrix2[order[j + 1]][i]),
+ abs(
+ matrix2[order[j + 1]][0]
+ / matrix2[order[j + 1]][i]
+ ),
)
if min2 > min1:
@@ -289,7 +309,9 @@ def _kernel(
kernel_dim = 0
for i in range(num_species):
- done = all(matrix[i][j] >= num_reactions for j in range(len(matrix[i])))
+ done = all(
+ matrix[i][j] >= num_reactions for j in range(len(matrix[i]))
+ )
if done and len(matrix[i]):
for j in range(len(matrix[i])):
RSolutions[kernel_dim].append(matrix[i][j] - num_reactions)
@@ -330,7 +352,8 @@ def _kernel(
assert int_kernel_dim <= kernel_dim
assert len(cls_species_idxs) == len(cls_coefficients), (
- "Inconsistent number of conserved quantities in coefficients and " "species"
+ "Inconsistent number of conserved quantities in coefficients and "
+ "species"
)
return (
kernel_dim,
@@ -343,7 +366,9 @@ def _kernel(
def _fill(
- stoichiometric_list: Sequence[float], matched: Sequence[int], num_species: int
+ stoichiometric_list: Sequence[float],
+ matched: Sequence[int],
+ num_species: int,
) -> Tuple[List[List[int]], List[List[int]], List[int]]:
"""Construct interaction matrix
@@ -460,14 +485,18 @@ def _is_linearly_dependent(
if len(matrix[order[j]]) > 1:
for i in range(len(matrix[order[j]])):
min1 = min(
- min1, abs(matrix2[order[j]][0] / matrix2[order[j]][i])
+ min1,
+ abs(matrix2[order[j]][0] / matrix2[order[j]][i]),
)
min2 = _MAX
if len(matrix[order[j + 1]]) > 1:
for i in range(len(matrix[order[j + 1]])):
min2 = min(
min2,
- abs(matrix2[order[j + 1]][0] / matrix2[order[j + 1]][i]),
+ abs(
+ matrix2[order[j + 1]][0]
+ / matrix2[order[j + 1]][i]
+ ),
)
if min2 > min1:
# swap
@@ -549,7 +578,9 @@ def _monte_carlo(
considered otherwise the algorithm retries Monte Carlo up to max_iter
"""
dim = len(matched)
- num = [int(2 * random.uniform(0, 1)) if len(J[i]) else 0 for i in range(dim)]
+ num = [
+ int(2 * random.uniform(0, 1)) if len(J[i]) else 0 for i in range(dim)
+ ]
numtot = sum(num)
def compute_h():
@@ -611,7 +642,12 @@ def compute_h():
# founds MCLS? need to check for linear independence
if len(int_matched) and not _is_linearly_dependent(
- num, int_kernel_dim, cls_species_idxs, cls_coefficients, matched, num_species
+ num,
+ int_kernel_dim,
+ cls_species_idxs,
+ cls_coefficients,
+ matched,
+ num_species,
):
logger.debug("Found a moiety but it is linearly dependent... next.")
return False, int_kernel_dim, int_matched
@@ -708,14 +744,18 @@ def _relax(
if len(matrix[order[j]]) > 1:
for i in range(len(matrix[order[j]])):
min1 = min(
- min1, abs(matrix2[order[j]][0] / matrix2[order[j]][i])
+ min1,
+ abs(matrix2[order[j]][0] / matrix2[order[j]][i]),
)
min2 = _MAX
if len(matrix[order[j + 1]]) > 1:
for i in range(len(matrix[order[j + 1]])):
min2 = min(
min2,
- abs(matrix2[order[j + 1]][0] / matrix2[order[j + 1]][i]),
+ abs(
+ matrix2[order[j + 1]][0]
+ / matrix2[order[j + 1]][i]
+ ),
)
if min2 > min1:
# swap
@@ -774,7 +814,9 @@ def _relax(
row_k[matrix[j][a]] -= matrix2[j][a] * matrix2[k][i]
# filter
matrix[k] = [
- row_idx for row_idx, row_val in enumerate(row_k) if row_val != 0
+ row_idx
+ for row_idx, row_val in enumerate(row_k)
+ if row_val != 0
]
matrix2[k] = [row_val for row_val in row_k if row_val != 0]
diff --git a/python/sdist/amici/custom_commands.py b/python/sdist/amici/custom_commands.py
index 2e69800fc7..46abfe3290 100644
--- a/python/sdist/amici/custom_commands.py
+++ b/python/sdist/amici/custom_commands.py
@@ -126,6 +126,16 @@ def run(self):
class AmiciBuildCMakeExtension(BuildExtension):
+ def finalize_options(self):
+ # Allow overriding the - since setuptools version 64 randomly named -
+ # setuptools/distutils temporary build directory via environment variable.
+ # This is useful for CI builds where we need the files in this directory
+ # for code coverage analysis.
+ if os.getenv("AMICI_BUILD_TEMP"):
+ self.build_temp = os.getenv("AMICI_BUILD_TEMP")
+
+ super().finalize_options()
+
def run(self):
"""Copy the generated clibs to the extensions folder to be included in
the wheel
@@ -171,7 +181,8 @@ def build_extension(self, ext: CMakeExtension) -> None:
build_dir = self.build_lib if self.inplace == 0 else os.getcwd()
build_dir = Path(build_dir).absolute().as_posix()
ext.cmake_configure_options = [
- x.replace("${build_dir}", build_dir) for x in ext.cmake_configure_options
+ x.replace("${build_dir}", build_dir)
+ for x in ext.cmake_configure_options
]
super().build_extension(ext)
diff --git a/python/sdist/amici/cxxcodeprinter.py b/python/sdist/amici/cxxcodeprinter.py
index 3055518c5b..e6e377b331 100644
--- a/python/sdist/amici/cxxcodeprinter.py
+++ b/python/sdist/amici/cxxcodeprinter.py
@@ -68,7 +68,9 @@ def doprint(self, expr: sp.Expr, assign_to: Optional[str] = None) -> str:
def _print_min_max(self, expr, cpp_fun: str, sympy_fun):
# C++ doesn't like mixing int and double for arguments for min/max,
# therefore, we just always convert to float
- arg0 = sp.Float(expr.args[0]) if expr.args[0].is_number else expr.args[0]
+ arg0 = (
+ sp.Float(expr.args[0]) if expr.args[0].is_number else expr.args[0]
+ )
if len(expr.args) == 1:
return self._print(arg0)
return "%s%s(%s, %s)" % (
@@ -108,13 +110,18 @@ def _get_sym_lines_array(
C++ code as list of lines
"""
return [
- " " * indent_level + f"{variable}[{index}] = " f"{self.doprint(math)};"
+ " " * indent_level + f"{variable}[{index}] = "
+ f"{self.doprint(math)};"
for index, math in enumerate(equations)
if math not in [0, 0.0]
]
def _get_sym_lines_symbols(
- self, symbols: sp.Matrix, equations: sp.Matrix, variable: str, indent_level: int
+ self,
+ symbols: sp.Matrix,
+ equations: sp.Matrix,
+ variable: str,
+ indent_level: int,
) -> List[str]:
"""
Generate C++ code for where array elements are directly replaced with
@@ -146,7 +153,9 @@ def format_regular_line(symbol, math, index):
if self.extract_cse:
# Extract common subexpressions
cse_sym_prefix = "__amici_cse_"
- symbol_generator = numbered_symbols(cls=sp.Symbol, prefix=cse_sym_prefix)
+ symbol_generator = numbered_symbols(
+ cls=sp.Symbol, prefix=cse_sym_prefix
+ )
replacements, reduced_exprs = sp.cse(
equations,
symbols=symbol_generator,
@@ -162,7 +171,9 @@ def format_regular_line(symbol, math, index):
sorted_symbols = toposort(
{
identifier: {
- s for s in definition.free_symbols if s in expr_dict
+ s
+ for s in definition.free_symbols
+ if s in expr_dict
}
for (identifier, definition) in expr_dict.items()
}
@@ -178,7 +189,9 @@ def format_line(symbol: sp.Symbol):
f"= {self.doprint(math)};"
)
elif math not in [0, 0.0]:
- return format_regular_line(symbol, math, symbol_to_idx[symbol])
+ return format_regular_line(
+ symbol, math, symbol_to_idx[symbol]
+ )
return [
line
@@ -247,7 +260,9 @@ def csc_matrix(
symbol_row_vals.append(row)
idx += 1
- symbol_name = f"d{rownames[row].name}" f"_d{colnames[col].name}"
+ symbol_name = (
+ f"d{rownames[row].name}" f"_d{colnames[col].name}"
+ )
if identifier:
symbol_name += f"_{identifier}"
symbol_list.append(symbol_name)
@@ -267,7 +282,13 @@ def csc_matrix(
else:
sparse_list = sp.Matrix(sparse_list)
- return symbol_col_ptrs, symbol_row_vals, sparse_list, symbol_list, sparse_matrix
+ return (
+ symbol_col_ptrs,
+ symbol_row_vals,
+ sparse_list,
+ symbol_list,
+ sparse_matrix,
+ )
@staticmethod
def print_bool(expr) -> str:
diff --git a/python/sdist/amici/de_export.py b/python/sdist/amici/de_export.py
index 1b46b5be84..b1fa02c421 100644
--- a/python/sdist/amici/de_export.py
+++ b/python/sdist/amici/de_export.py
@@ -27,12 +27,12 @@
Callable,
Dict,
List,
+ Literal,
Optional,
Sequence,
Set,
Tuple,
Union,
- Literal
)
import numpy as np
@@ -70,9 +70,13 @@
# Template for model simulation main.cpp file
CXX_MAIN_TEMPLATE_FILE = os.path.join(amiciSrcPath, "main.template.cpp")
# Template for model/swig/CMakeLists.txt
-SWIG_CMAKE_TEMPLATE_FILE = os.path.join(amiciSwigPath, "CMakeLists_model.cmake")
+SWIG_CMAKE_TEMPLATE_FILE = os.path.join(
+ amiciSwigPath, "CMakeLists_model.cmake"
+)
# Template for model/CMakeLists.txt
-MODEL_CMAKE_TEMPLATE_FILE = os.path.join(amiciSrcPath, "CMakeLists.template.cmake")
+MODEL_CMAKE_TEMPLATE_FILE = os.path.join(
+ amiciSrcPath, "CMakeLists.template.cmake"
+)
IDENTIFIER_PATTERN = re.compile(r"^[a-zA-Z_]\w*$")
DERIVATIVE_PATTERN = re.compile(r"^d(x_rdata|xdot|\w+?)d(\w+?)(?:_explicit)?$")
@@ -285,7 +289,8 @@ def arguments(self, ode: bool = True) -> str:
" const realtype *k, const int ip",
),
"sigmaz": _FunctionInfo(
- "realtype *sigmaz, const realtype t, const realtype *p, " "const realtype *k",
+ "realtype *sigmaz, const realtype t, const realtype *p, "
+ "const realtype *k",
),
"sroot": _FunctionInfo(
"realtype *stau, const realtype t, const realtype *x, "
@@ -326,7 +331,8 @@ def arguments(self, ode: bool = True) -> str:
assume_pow_positivity=True,
),
"x0": _FunctionInfo(
- "realtype *x0, const realtype t, const realtype *p, " "const realtype *k"
+ "realtype *x0, const realtype t, const realtype *p, "
+ "const realtype *k"
),
"x0_fixedParameters": _FunctionInfo(
"realtype *x0_fixedParameters, const realtype t, "
@@ -938,7 +944,9 @@ def states(self) -> List[State]:
@log_execution_time("importing SbmlImporter", logger)
def import_from_sbml_importer(
- self, si: "sbml_import.SbmlImporter", compute_cls: Optional[bool] = True
+ self,
+ si: "sbml_import.SbmlImporter",
+ compute_cls: Optional[bool] = True,
) -> None:
"""
Imports a model specification from a
@@ -1013,15 +1021,21 @@ def transform_dxdt_to_concentration(species_id, dxdt):
# we need to flatten out assignments in the compartment in
# order to ensure that we catch all species dependencies
- v = smart_subs_dict(v, si.symbols[SymbolId.EXPRESSION], "value")
+ v = smart_subs_dict(
+ v, si.symbols[SymbolId.EXPRESSION], "value"
+ )
dv_dt = v.diff(amici_time_symbol)
# we may end up with a time derivative of the compartment
# volume due to parameter rate rules
comp_rate_vars = [
- p for p in v.free_symbols if p in si.symbols[SymbolId.SPECIES]
+ p
+ for p in v.free_symbols
+ if p in si.symbols[SymbolId.SPECIES]
]
for var in comp_rate_vars:
- dv_dt += v.diff(var) * si.symbols[SymbolId.SPECIES][var]["dt"]
+ dv_dt += (
+ v.diff(var) * si.symbols[SymbolId.SPECIES][var]["dt"]
+ )
dv_dx = v.diff(species_id)
xdot = (dxdt - dv_dt * species_id) / (dv_dx * species_id + v)
return xdot
@@ -1040,7 +1054,9 @@ def transform_dxdt_to_concentration(species_id, dxdt):
return dxdt / v
# create dynamics without respecting conservation laws first
- dxdt = smart_multiply(si.stoichiometric_matrix, MutableDenseMatrix(fluxes))
+ dxdt = smart_multiply(
+ si.stoichiometric_matrix, MutableDenseMatrix(fluxes)
+ )
for ix, ((species_id, species), formula) in enumerate(
zip(symbols[SymbolId.SPECIES].items(), dxdt)
):
@@ -1050,7 +1066,9 @@ def transform_dxdt_to_concentration(species_id, dxdt):
if species["amount"]:
species["dt"] = formula
else:
- species["dt"] = transform_dxdt_to_concentration(species_id, formula)
+ species["dt"] = transform_dxdt_to_concentration(
+ species_id, formula
+ )
# create all basic components of the DE model and add them.
for symbol_name in symbols:
@@ -1098,11 +1116,14 @@ def transform_dxdt_to_concentration(species_id, dxdt):
# fill in 'self._sym' based on prototypes and components in ode_model
self.generate_basic_variables()
self._has_quadratic_nllh = all(
- llh["dist"] in ["normal", "lin-normal", "log-normal", "log10-normal"]
+ llh["dist"]
+ in ["normal", "lin-normal", "log-normal", "log10-normal"]
for llh in si.symbols[SymbolId.LLHY].values()
)
- self._process_sbml_rate_of(symbols) # substitute SBML-rateOf constructs
+ self._process_sbml_rate_of(
+ symbols
+ ) # substitute SBML-rateOf constructs
def _process_sbml_rate_of(self, symbols) -> None:
"""Substitute any SBML-rateOf constructs in the model equations"""
@@ -1142,7 +1163,10 @@ def get_rate(symbol: sp.Symbol):
for i_state in range(len(self.eq("x0"))):
if rate_ofs := self._eqs["x0"][i_state].find(rate_of_func):
self._eqs["x0"][i_state] = self._eqs["x0"][i_state].subs(
- {rate_of: get_rate(rate_of.args[0]) for rate_of in rate_ofs}
+ {
+ rate_of: get_rate(rate_of.args[0])
+ for rate_of in rate_ofs
+ }
)
for component in chain(
@@ -1169,7 +1193,10 @@ def get_rate(symbol: sp.Symbol):
component.set_val(
component.get_val().subs(
- {rate_of: get_rate(rate_of.args[0]) for rate_of in rate_ofs}
+ {
+ rate_of: get_rate(rate_of.args[0])
+ for rate_of in rate_ofs
+ }
)
)
@@ -1265,14 +1292,21 @@ def add_conservation_law(
)[0]
except StopIteration:
raise ValueError(
- f"Specified state {state} was not found in the " f"model states."
+ f"Specified state {state} was not found in the "
+ f"model states."
)
state_id = self._differential_states[ix].get_id()
# \sum_{i≠j}(a_i * x_i)/a_j
target_expression = (
- sp.Add(*(c_i * x_i for x_i, c_i in coefficients.items() if x_i != state))
+ sp.Add(
+ *(
+ c_i * x_i
+ for x_i, c_i in coefficients.items()
+ if x_i != state
+ )
+ )
/ coefficients[state]
)
@@ -1469,7 +1503,9 @@ def sparseeq(self, name) -> sp.Matrix:
self._generate_sparse_symbol(name)
return self._sparseeqs[name]
- def colptrs(self, name: str) -> Union[List[sp.Number], List[List[sp.Number]]]:
+ def colptrs(
+ self, name: str
+ ) -> Union[List[sp.Number], List[List[sp.Number]]]:
"""
Returns (and constructs if necessary) the column pointers for
a sparsified symbolic variable.
@@ -1486,7 +1522,9 @@ def colptrs(self, name: str) -> Union[List[sp.Number], List[List[sp.Number]]]:
self._generate_sparse_symbol(name)
return self._colptrs[name]
- def rowvals(self, name: str) -> Union[List[sp.Number], List[List[sp.Number]]]:
+ def rowvals(
+ self, name: str
+ ) -> Union[List[sp.Number], List[List[sp.Number]]]:
"""
Returns (and constructs if necessary) the row values for a
sparsified symbolic variable.
@@ -1554,7 +1592,9 @@ def _generate_symbol(self, name: str) -> None:
"""
if name in self._variable_prototype:
components = self._variable_prototype[name]()
- self._syms[name] = sp.Matrix([comp.get_id() for comp in components])
+ self._syms[name] = sp.Matrix(
+ [comp.get_id() for comp in components]
+ )
if name == "y":
self._syms["my"] = sp.Matrix(
[comp.get_measurement_symbol() for comp in components]
@@ -1738,7 +1778,9 @@ def get_appearance_counts(self, idxs: List[int]) -> List[int]:
return [
free_symbols_dt.count(str(self._differential_states[idx].get_id()))
- + free_symbols_expr.count(str(self._differential_states[idx].get_id()))
+ + free_symbols_expr.count(
+ str(self._differential_states[idx].get_id())
+ )
for idx in idxs
]
@@ -1823,7 +1865,9 @@ def _compute_equation(self, name: str) -> None:
time_symbol = sp.Matrix([amici_time_symbol])
if name in self._equation_prototype:
- self._equation_from_components(name, self._equation_prototype[name]())
+ self._equation_from_components(
+ name, self._equation_prototype[name]()
+ )
elif name in self._total_derivative_prototypes:
args = self._total_derivative_prototypes[name]
@@ -1913,7 +1957,9 @@ def _compute_equation(self, name: str) -> None:
if any(sym in eq.free_symbols for sym in k)
]
eq = self.eq("x0")
- self._eqs[name] = sp.Matrix([eq[ix] for ix in self._x0_fixedParameters_idx])
+ self._eqs[name] = sp.Matrix(
+ [eq[ix] for ix in self._x0_fixedParameters_idx]
+ )
elif name == "dtotal_cldx_rdata":
x_rdata = self.sym("x_rdata")
@@ -1926,7 +1972,9 @@ def _compute_equation(self, name: str) -> None:
elif name == "dtcldx":
# this is always zero
- self._eqs[name] = sp.zeros(self.num_cons_law(), self.num_states_solver())
+ self._eqs[name] = sp.zeros(
+ self.num_cons_law(), self.num_states_solver()
+ )
elif name == "dtcldp":
# force symbols
@@ -1951,15 +1999,21 @@ def _compute_equation(self, name: str) -> None:
elif name == "dx_rdatadp":
if self.num_cons_law():
- self._eqs[name] = smart_jacobian(self.eq("x_rdata"), self.sym("p"))
+ self._eqs[name] = smart_jacobian(
+ self.eq("x_rdata"), self.sym("p")
+ )
else:
# so far, dx_rdatadp is only required for sx_rdata
# in case of no conservation laws, C++ code will directly use
# sx, we don't need this
- self._eqs[name] = sp.zeros(self.num_states_rdata(), self.num_par())
+ self._eqs[name] = sp.zeros(
+ self.num_states_rdata(), self.num_par()
+ )
elif name == "dx_rdatadtcl":
- self._eqs[name] = smart_jacobian(self.eq("x_rdata"), self.sym("tcl"))
+ self._eqs[name] = smart_jacobian(
+ self.eq("x_rdata"), self.sym("tcl")
+ )
elif name == "dxdotdx_explicit":
# force symbols
@@ -2022,7 +2076,9 @@ def _compute_equation(self, name: str) -> None:
self._eqs[name] = event_eqs
elif name == "z":
- event_observables = [sp.zeros(self.num_eventobs(), 1) for _ in self._events]
+ event_observables = [
+ sp.zeros(self.num_eventobs(), 1) for _ in self._events
+ ]
event_ids = [e.get_id() for e in self._events]
# TODO: get rid of this stupid 1-based indexing as soon as we can
# the matlab interface
@@ -2030,7 +2086,9 @@ def _compute_equation(self, name: str) -> None:
event_ids.index(event_obs.get_event()) + 1
for event_obs in self._event_observables
]
- for (iz, ie), event_obs in zip(enumerate(z2event), self._event_observables):
+ for (iz, ie), event_obs in zip(
+ enumerate(z2event), self._event_observables
+ ):
event_observables[ie - 1][iz] = event_obs.get_val()
self._eqs[name] = event_observables
@@ -2048,7 +2106,10 @@ def _compute_equation(self, name: str) -> None:
]
if name == "dzdx":
for ie in range(self.num_events()):
- dtaudx = -self.eq("drootdx")[ie, :] / self.eq("drootdt_total")[ie]
+ dtaudx = (
+ -self.eq("drootdx")[ie, :]
+ / self.eq("drootdt_total")[ie]
+ )
for iz in range(self.num_eventobs()):
if ie != self._z2event[iz] - 1:
continue
@@ -2119,7 +2180,9 @@ def _compute_equation(self, name: str) -> None:
)
# finish chain rule for the state variables
- tmp_eq += smart_multiply(self.eq("ddeltaxdx")[ie], tmp_dxdp)
+ tmp_eq += smart_multiply(
+ self.eq("ddeltaxdx")[ie], tmp_dxdp
+ )
event_eqs.append(tmp_eq)
@@ -2138,7 +2201,9 @@ def _compute_equation(self, name: str) -> None:
# that we need to reverse the order here
for cl in reversed(self._conservation_laws)
]
- ).col_join(smart_jacobian(self.eq("w")[self.num_cons_law() :, :], x))
+ ).col_join(
+ smart_jacobian(self.eq("w")[self.num_cons_law() :, :], x)
+ )
elif match_deriv:
self._derivative(match_deriv[1], match_deriv[2], name)
@@ -2212,7 +2277,10 @@ def _derivative(self, eq: str, var: str, name: str = None) -> None:
and cv not in self._lock_total_derivative
and var != cv
and min(self.sym(cv).shape)
- and ((eq, var) not in ignore_chainrule or ignore_chainrule[(eq, var)] != cv)
+ and (
+ (eq, var) not in ignore_chainrule
+ or ignore_chainrule[(eq, var)] != cv
+ )
]
if len(chainvars):
self._lock_total_derivative += chainvars
@@ -2315,9 +2383,14 @@ def _total_derivative(
dxdz = self.sym_or_eq(name, dxdz_name)
# Save time for large models if one multiplicand is zero,
# which is not checked for by sympy
- if not smart_is_zero_matrix(dydx) and not smart_is_zero_matrix(dxdz):
+ if not smart_is_zero_matrix(dydx) and not smart_is_zero_matrix(
+ dxdz
+ ):
dydx_times_dxdz = smart_multiply(dydx, dxdz)
- if dxdz.shape[1] == 1 and self._eqs[name].shape[1] != dxdz.shape[1]:
+ if (
+ dxdz.shape[1] == 1
+ and self._eqs[name].shape[1] != dxdz.shape[1]
+ ):
for iz in range(self._eqs[name].shape[1]):
self._eqs[name][:, iz] += dydx_times_dxdz
else:
@@ -2343,7 +2416,9 @@ def sym_or_eq(self, name: str, varname: str) -> sp.Matrix:
# within a column may differ from the initialization of symbols here,
# so those are not safe to use. Not removing them from signature as
# this would break backwards compatibility.
- if var_in_function_signature(name, varname, self.is_ode()) and varname not in [
+ if var_in_function_signature(
+ name, varname, self.is_ode()
+ ) and varname not in [
"dwdx",
"dwdp",
]:
@@ -2467,7 +2542,8 @@ def state_has_fixed_parameter_initial_condition(self, ix: int) -> bool:
if not isinstance(ic, sp.Basic):
return False
return any(
- fp in (c.get_id() for c in self._constants) for fp in ic.free_symbols
+ fp in (c.get_id() for c in self._constants)
+ for fp in ic.free_symbols
)
def state_has_conservation_law(self, ix: int) -> bool:
@@ -2692,7 +2768,8 @@ class DEExporter:
due to numerical errors
:ivar compiler:
- distutils/setuptools compiler selection to build the Python extension
+ Absolute path to the compiler executable to be used to build the Python
+ extension, e.g. ``/usr/bin/clang``.
:ivar functions:
carries C++ function signatures and other specifications
@@ -2755,8 +2832,8 @@ def __init__(
used to avoid problems with state variables that may become
negative due to numerical errors
- :param compiler: distutils/setuptools compiler selection to build the
- python extension
+ :param compiler: Absolute path to the compiler executable to be used
+ to build the Python extension, e.g. ``/usr/bin/clang``.
:param allow_reinit_fixpar_initcond:
see :class:`amici.de_export.DEExporter`
@@ -2784,7 +2861,9 @@ def __init__(
# include/amici/model.h for details)
self.model: DEModel = de_model
self.model._code_printer.known_functions.update(
- splines.spline_user_functions(self.model.splines, self._get_index("p"))
+ splines.spline_user_functions(
+ self.model.splines, self._get_index("p")
+ )
)
# To only generate a subset of functions, apply subselection here
@@ -2800,7 +2879,9 @@ def generate_model_code(self) -> None:
Generates the native C++ code for the loaded model and a Matlab
script that can be run to compile a mex file from the C++ code
"""
- with _monkeypatched(sp.Pow, "_eval_derivative", _custom_pow_eval_derivative):
+ with _monkeypatched(
+ sp.Pow, "_eval_derivative", _custom_pow_eval_derivative
+ ):
self._prepare_model_folder()
self._generate_c_code()
self._generate_m_code()
@@ -2862,7 +2943,9 @@ def _generate_c_code(self) -> None:
self._write_swig_files()
self._write_module_setup()
- shutil.copy(CXX_MAIN_TEMPLATE_FILE, os.path.join(self.model_path, "main.cpp"))
+ shutil.copy(
+ CXX_MAIN_TEMPLATE_FILE, os.path.join(self.model_path, "main.cpp")
+ )
def _compile_c_code(
self,
@@ -2876,8 +2959,8 @@ def _compile_c_code(
Make model compilation verbose
:param compiler:
- distutils/setuptools compiler selection to build the python
- extension
+ Absolute path to the compiler executable to be used to build the Python
+ extension, e.g. ``/usr/bin/clang``.
"""
# setup.py assumes it is run from within the model directory
module_dir = self.model_path
@@ -2900,8 +2983,10 @@ def _compile_c_code(
]
)
+ env = os.environ.copy()
if compiler is not None:
- script_args.extend([f"--compiler={compiler}"])
+ # CMake will use the compiler specified in the CXX environment variable
+ env["CXX"] = compiler
# distutils.core.run_setup looks nicer, but does not let us check the
# result easily
@@ -2912,6 +2997,7 @@ def _compile_c_code(
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
+ env=env,
)
except subprocess.CalledProcessError as e:
print(e.output.decode("utf-8"))
@@ -2940,8 +3026,10 @@ def _generate_m_code(self) -> None:
lines = [
"% This compile script was automatically created from"
" Python SBML import.",
- "% If mex compiler is set up within MATLAB, it can be run" " from MATLAB ",
- "% in order to compile a mex-file from the Python" " generated C++ files.",
+ "% If mex compiler is set up within MATLAB, it can be run"
+ " from MATLAB ",
+ "% in order to compile a mex-file from the Python"
+ " generated C++ files.",
"",
f"modelName = '{self.model_name}';",
"amimodel.compileAndLinkModel(modelName, '', [], [], [], []);",
@@ -2973,7 +3061,10 @@ def _get_index(self, name: str) -> Dict[sp.Symbol, int]:
else:
raise ValueError(f"Unknown symbolic array: {name}")
- return {strip_pysb(symbol).name: index for index, symbol in enumerate(symbols)}
+ return {
+ strip_pysb(symbol).name: index
+ for index, symbol in enumerate(symbols)
+ }
def _write_index_files(self, name: str) -> None:
"""
@@ -3028,7 +3119,8 @@ def _write_function_file(self, function: str) -> None:
if function in sparse_functions:
equations = self.model.sparseeq(function)
elif (
- not self.allow_reinit_fixpar_initcond and function == "sx0_fixedParameters"
+ not self.allow_reinit_fixpar_initcond
+ and function == "sx0_fixedParameters"
):
# Not required. Will create empty function body.
equations = sp.Matrix()
@@ -3055,17 +3147,22 @@ def _write_function_file(self, function: str) -> None:
lines = []
# function header
- lines.extend([
- '#include "amici/symbolic_functions.h"',
- '#include "amici/defines.h"',
- '#include "sundials/sundials_types.h"',
- "",
- "#include ",
- "#include ",
- "",
- ])
+ lines.extend(
+ [
+ '#include "amici/symbolic_functions.h"',
+ '#include "amici/defines.h"',
+ '#include "sundials/sundials_types.h"',
+ "",
+ "#include ",
+ "#include ",
+ "",
+ ]
+ )
if function == "create_splines":
- lines += ['#include "amici/splinefunctions.h"', "#include "]
+ lines += [
+ '#include "amici/splinefunctions.h"',
+ "#include ",
+ ]
func_info = self.functions[function]
@@ -3089,14 +3186,21 @@ def _write_function_file(self, function: str) -> None:
if iszero and not (
(sym == "y" and "Jy" in function)
- or (sym == "w" and "xdot" in function and len(self.model.sym(sym)))
+ or (
+ sym == "w"
+ and "xdot" in function
+ and len(self.model.sym(sym))
+ )
):
continue
lines.append(f'#include "{sym}.h"')
# include return symbols
- if function in self.model.sym_names() and function not in non_unique_id_symbols:
+ if (
+ function in self.model.sym_names()
+ and function not in non_unique_id_symbols
+ ):
lines.append(f'#include "{function}.h"')
lines.extend(
@@ -3115,7 +3219,10 @@ def _write_function_file(self, function: str) -> None:
body = [
# execute this twice to catch cases where the ending '(' would
# be the starting (^|\W) for the following match
- pow_rx.sub(r"\1amici::pos_pow(", pow_rx.sub(r"\1amici::pos_pow(", line))
+ pow_rx.sub(
+ r"\1amici::pos_pow(",
+ pow_rx.sub(r"\1amici::pos_pow(", line),
+ )
for line in body
]
@@ -3144,7 +3251,7 @@ def _write_function_file(self, function: str) -> None:
fileout.write("\n".join(lines))
def _generate_function_index(
- self, function: str, indextype: Literal["colptrs", "rowvals"]
+ self, function: str, indextype: Literal["colptrs", "rowvals"]
) -> List[str]:
"""
Generate equations and C++ code for the function ``function``.
@@ -3245,7 +3352,9 @@ def _generate_function_index(
return lines
- def _get_function_body(self, function: str, equations: sp.Matrix) -> List[str]:
+ def _get_function_body(
+ self, function: str, equations: sp.Matrix
+ ) -> List[str]:
"""
Generate C++ code for body of function ``function``.
@@ -3284,7 +3393,9 @@ def _get_function_body(self, function: str, equations: sp.Matrix) -> List[str]:
+ str(len(self.model._x0_fixedParameters_idx))
+ "> _x0_fixedParameters_idxs = {",
" "
- + ", ".join(str(x) for x in self.model._x0_fixedParameters_idx),
+ + ", ".join(
+ str(x) for x in self.model._x0_fixedParameters_idx
+ ),
" };",
"",
# Set all parameters that are to be reset to 0, so that the
@@ -3321,7 +3432,9 @@ def _get_function_body(self, function: str, equations: sp.Matrix) -> List[str]:
lines.extend(get_switch_statement("ip", cases, 1))
elif function == "x0_fixedParameters":
- for index, formula in zip(self.model._x0_fixedParameters_idx, equations):
+ for index, formula in zip(
+ self.model._x0_fixedParameters_idx, equations
+ ):
lines.append(
f" if(std::find(reinitialization_state_idxs.cbegin(), "
f"reinitialization_state_idxs.cend(), {index}) != "
@@ -3355,7 +3468,10 @@ def _get_function_body(self, function: str, equations: sp.Matrix) -> List[str]:
outer_cases[ie] = copy.copy(inner_lines)
lines.extend(get_switch_statement("ie", outer_cases, 1))
- elif function in sensi_functions and equations.shape[1] == self.model.num_par():
+ elif (
+ function in sensi_functions
+ and equations.shape[1] == self.model.num_par()
+ ):
cases = {
ipar: self.model._code_printer._get_sym_lines_array(
equations[:, ipar], function, 0
@@ -3388,7 +3504,8 @@ def _get_function_body(self, function: str, equations: sp.Matrix) -> List[str]:
lines.extend(get_switch_statement(iterator, cases, 1))
elif (
- function in self.model.sym_names() and function not in non_unique_id_symbols
+ function in self.model.sym_names()
+ and function not in non_unique_id_symbols
):
if function in sparse_functions:
symbols = list(map(sp.Symbol, self.model.sparsesym(function)))
@@ -3415,19 +3532,21 @@ def _get_create_splines_body(self):
body = ["return {"]
for ispl, spline in enumerate(self.model.splines):
if isinstance(spline.nodes, splines.UniformGrid):
- nodes = f"{ind8}{{{spline.nodes.start}, {spline.nodes.stop}}}, "
+ nodes = (
+ f"{ind8}{{{spline.nodes.start}, {spline.nodes.stop}}}, "
+ )
else:
nodes = f"{ind8}{{{', '.join(map(str, spline.nodes))}}}, "
# vector with the node values
- values = f"{ind8}{{{', '.join(map(str, spline.values_at_nodes))}}}, "
+ values = (
+ f"{ind8}{{{', '.join(map(str, spline.values_at_nodes))}}}, "
+ )
# vector with the slopes
if spline.derivatives_by_fd:
slopes = f"{ind8}{{}},"
else:
- slopes = (
- f"{ind8}{{{', '.join(map(str, spline.derivatives_at_nodes))}}},"
- )
+ slopes = f"{ind8}{{{', '.join(map(str, spline.derivatives_at_nodes))}}},"
body.extend(
[
@@ -3450,7 +3569,8 @@ def _get_create_splines_body(self):
body.append(ind8 + bc_to_cpp[bc])
except KeyError:
raise ValueError(
- f"Unknown boundary condition '{bc}' " "found in spline object"
+ f"Unknown boundary condition '{bc}' "
+ "found in spline object"
)
extrapolate_to_cpp = {
None: "SplineExtrapolation::noExtrapolation, ",
@@ -3464,12 +3584,15 @@ def _get_create_splines_body(self):
body.append(ind8 + extrapolate_to_cpp[extr])
except KeyError:
raise ValueError(
- f"Unknown extrapolation '{extr}' " "found in spline object"
+ f"Unknown extrapolation '{extr}' "
+ "found in spline object"
)
line = ind8
line += "true, " if spline.derivatives_by_fd else "false, "
line += (
- "true, " if isinstance(spline.nodes, splines.UniformGrid) else "false, "
+ "true, "
+ if isinstance(spline.nodes, splines.UniformGrid)
+ else "false, "
)
line += "true" if spline.logarithmic_parametrization else "false"
body.append(line)
@@ -3548,10 +3671,12 @@ def _write_model_header_cpp(self) -> None:
"NK": self.model.num_const(),
"O2MODE": "amici::SecondOrderMode::none",
# using code printer ensures proper handling of nan/inf
- "PARAMETERS": self.model._code_printer.doprint(self.model.val("p"))[1:-1],
- "FIXED_PARAMETERS": self.model._code_printer.doprint(self.model.val("k"))[
- 1:-1
- ],
+ "PARAMETERS": self.model._code_printer.doprint(
+ self.model.val("p")
+ )[1:-1],
+ "FIXED_PARAMETERS": self.model._code_printer.doprint(
+ self.model.val("k")
+ )[1:-1],
"PARAMETER_NAMES_INITIALIZER_LIST": self._get_symbol_name_initializer_list(
"p"
),
@@ -3566,12 +3691,16 @@ def _write_model_header_cpp(self) -> None:
),
"OBSERVABLE_TRAFO_INITIALIZER_LIST": "\n".join(
f"ObservableScaling::{trafo.value}, // y[{idx}]"
- for idx, trafo in enumerate(self.model.get_observable_transformations())
+ for idx, trafo in enumerate(
+ self.model.get_observable_transformations()
+ )
),
"EXPRESSION_NAMES_INITIALIZER_LIST": self._get_symbol_name_initializer_list(
"w"
),
- "PARAMETER_IDS_INITIALIZER_LIST": self._get_symbol_id_initializer_list("p"),
+ "PARAMETER_IDS_INITIALIZER_LIST": self._get_symbol_id_initializer_list(
+ "p"
+ ),
"STATE_IDS_INITIALIZER_LIST": self._get_symbol_id_initializer_list(
"x_rdata"
),
@@ -3652,16 +3781,22 @@ def _write_model_header_cpp(self) -> None:
indexfield,
nobody=True,
)
- tpl_data[f"{func_name.upper()}_{indexfield.upper()}_DEF"] = ""
+ tpl_data[
+ f"{func_name.upper()}_{indexfield.upper()}_DEF"
+ ] = ""
tpl_data[
f"{func_name.upper()}_{indexfield.upper()}_IMPL"
] = impl
continue
- tpl_data[f"{func_name.upper()}_DEF"] = get_function_extern_declaration(
+ tpl_data[
+ f"{func_name.upper()}_DEF"
+ ] = get_function_extern_declaration(
func_name, self.model_name, self.model.is_ode()
)
- tpl_data[f"{func_name.upper()}_IMPL"] = get_model_override_implementation(
+ tpl_data[
+ f"{func_name.upper()}_IMPL"
+ ] = get_model_override_implementation(
func_name, self.model_name, self.model.is_ode()
)
if func_name in sparse_functions:
@@ -3892,7 +4027,9 @@ def get_function_extern_declaration(fun: str, name: str, ode: bool) -> str:
return f"extern {f.return_type} {fun}_{name}({f.arguments(ode)});"
-def get_sunindex_extern_declaration(fun: str, name: str, indextype: str) -> str:
+def get_sunindex_extern_declaration(
+ fun: str, name: str, indextype: str
+) -> str:
"""
Constructs the function declaration for an index function of a given
function
@@ -4091,7 +4228,8 @@ def _custom_pow_eval_derivative(self, s):
return part1 + part2
return part1 + sp.Piecewise(
- (self.base, sp.And(sp.Eq(self.base, 0), sp.Eq(dbase, 0))), (part2, True)
+ (self.base, sp.And(sp.Eq(self.base, 0), sp.Eq(dbase, 0))),
+ (part2, True),
)
diff --git a/python/sdist/amici/de_model.py b/python/sdist/amici/de_model.py
index c5363511e7..77d9013ad2 100644
--- a/python/sdist/amici/de_model.py
+++ b/python/sdist/amici/de_model.py
@@ -67,7 +67,8 @@ def __init__(
hasattr(identifier, "name") and identifier.name in RESERVED_SYMBOLS
):
raise ValueError(
- f'Cannot add model quantity with name "{name}", ' f"please rename."
+ f'Cannot add model quantity with name "{name}", '
+ f"please rename."
)
self._identifier: sp.Symbol = identifier
@@ -301,7 +302,9 @@ class DifferentialState(State):
"""
- def __init__(self, identifier: sp.Symbol, name: str, init: sp.Expr, dt: sp.Expr):
+ def __init__(
+ self, identifier: sp.Symbol, name: str, init: sp.Expr, dt: sp.Expr
+ ):
"""
Create a new State instance. Extends :meth:`ModelQuantity.__init__`
by ``dt``
@@ -335,7 +338,8 @@ def set_conservation_law(self, law: ConservationLaw) -> None:
"""
if not isinstance(law, ConservationLaw):
raise TypeError(
- f"conservation law must have type ConservationLaw" f", was {type(law)}"
+ f"conservation law must have type ConservationLaw"
+ f", was {type(law)}"
)
self._conservation_law = law
@@ -425,13 +429,17 @@ def __init__(
def get_measurement_symbol(self) -> sp.Symbol:
if self._measurement_symbol is None:
- self._measurement_symbol = generate_measurement_symbol(self.get_id())
+ self._measurement_symbol = generate_measurement_symbol(
+ self.get_id()
+ )
return self._measurement_symbol
def get_regularization_symbol(self) -> sp.Symbol:
if self._regularization_symbol is None:
- self._regularization_symbol = generate_regularization_symbol(self.get_id())
+ self._regularization_symbol = generate_regularization_symbol(
+ self.get_id()
+ )
return self._regularization_symbol
@@ -556,7 +564,9 @@ class Parameter(ModelQuantity):
sensitivities may be computed, abbreviated by ``p``.
"""
- def __init__(self, identifier: sp.Symbol, name: str, value: numbers.Number):
+ def __init__(
+ self, identifier: sp.Symbol, name: str, value: numbers.Number
+ ):
"""
Create a new Expression instance.
@@ -579,7 +589,9 @@ class Constant(ModelQuantity):
sensitivities cannot be computed, abbreviated by ``k``.
"""
- def __init__(self, identifier: sp.Symbol, name: str, value: numbers.Number):
+ def __init__(
+ self, identifier: sp.Symbol, name: str, value: numbers.Number
+ ):
"""
Create a new Expression instance.
diff --git a/python/sdist/amici/gradient_check.py b/python/sdist/amici/gradient_check.py
index ee900fe902..27e2d671d3 100644
--- a/python/sdist/amici/gradient_check.py
+++ b/python/sdist/amici/gradient_check.py
@@ -193,7 +193,8 @@ def check_derivatives(
fields.append("x")
leastsquares_applicable = (
- solver.getSensitivityMethod() == SensitivityMethod.forward and edata is not None
+ solver.getSensitivityMethod() == SensitivityMethod.forward
+ and edata is not None
)
if (
@@ -208,10 +209,18 @@ def check_derivatives(
fields += ["res", "y"]
_check_results(
- rdata, "FIM", np.dot(rdata["sres"].T, rdata["sres"]), atol=1e-8, rtol=1e-4
+ rdata,
+ "FIM",
+ np.dot(rdata["sres"].T, rdata["sres"]),
+ atol=1e-8,
+ rtol=1e-4,
)
_check_results(
- rdata, "sllh", -np.dot(rdata["res"].T, rdata["sres"]), atol=1e-8, rtol=1e-4
+ rdata,
+ "sllh",
+ -np.dot(rdata["res"].T, rdata["sres"]),
+ atol=1e-8,
+ rtol=1e-4,
)
if edata is not None:
@@ -221,7 +230,15 @@ def check_derivatives(
if pval == 0.0 and skip_zero_pars:
continue
check_finite_difference(
- p, model, solver, edata, ip, fields, atol=atol, rtol=rtol, epsilon=epsilon
+ p,
+ model,
+ solver,
+ edata,
+ ip,
+ fields,
+ atol=atol,
+ rtol=rtol,
+ epsilon=epsilon,
)
@@ -317,4 +334,6 @@ def _check_results(
if type(result) is float:
result = np.array(result)
- _check_close(result=result, expected=expected, atol=atol, rtol=rtol, field=field)
+ _check_close(
+ result=result, expected=expected, atol=atol, rtol=rtol, field=field
+ )
diff --git a/python/sdist/amici/import_utils.py b/python/sdist/amici/import_utils.py
index 953af3dd85..77a2add60b 100644
--- a/python/sdist/amici/import_utils.py
+++ b/python/sdist/amici/import_utils.py
@@ -45,14 +45,18 @@ def __init__(self, data):
s = "Circular dependencies exist among these items: {{{}}}".format(
", ".join(
"{!r}:{!r}".format(key, value)
- for key, value in sorted({str(k): v for k, v in data.items()}.items())
+ for key, value in sorted(
+ {str(k): v for k, v in data.items()}.items()
+ )
)
)
super(CircularDependencyError, self).__init__(s)
self.data = data
-setattr(sys.modules["toposort"], "CircularDependencyError", CircularDependencyError)
+setattr(
+ sys.modules["toposort"], "CircularDependencyError", CircularDependencyError
+)
annotation_namespace = "https://github.com/AMICI-dev/AMICI"
@@ -215,7 +219,8 @@ def noise_distribution_to_cost_function(
y_string = "log(2*{sigma}*{m}) + Abs(log({y}) - log({m})) / {sigma}"
elif noise_distribution == "log10-laplace":
y_string = (
- "log(2*{sigma}*{m}*log(10)) " "+ Abs(log({y}, 10) - log({m}, 10)) / {sigma}"
+ "log(2*{sigma}*{m}*log(10)) "
+ "+ Abs(log({y}, 10) - log({m}, 10)) / {sigma}"
)
elif noise_distribution in ["binomial", "lin-binomial"]:
# Binomial noise model parameterized via success probability p
@@ -236,7 +241,9 @@ def noise_distribution_to_cost_function(
f"- {{m}} * log({{sigma}})"
)
else:
- raise ValueError(f"Cost identifier {noise_distribution} not recognized.")
+ raise ValueError(
+ f"Cost identifier {noise_distribution} not recognized."
+ )
def nllh_y_string(str_symbol):
y, m, sigma = _get_str_symbol_identifiers(str_symbol)
@@ -252,7 +259,10 @@ def _get_str_symbol_identifiers(str_symbol: str) -> tuple:
def smart_subs_dict(
- sym: sp.Expr, subs: SymbolDef, field: Optional[str] = None, reverse: bool = True
+ sym: sp.Expr,
+ subs: SymbolDef,
+ field: Optional[str] = None,
+ reverse: bool = True,
) -> sp.Expr:
"""
Substitutes expressions completely flattening them out. Requires
@@ -275,7 +285,8 @@ def smart_subs_dict(
Substituted symbolic expression
"""
s = [
- (eid, expr[field] if field is not None else expr) for eid, expr in subs.items()
+ (eid, expr[field] if field is not None else expr)
+ for eid, expr in subs.items()
]
if reverse:
s.reverse()
@@ -306,7 +317,9 @@ def smart_subs(element: sp.Expr, old: sp.Symbol, new: sp.Expr) -> sp.Expr:
return element.subs(old, new) if element.has(old) else element
-def toposort_symbols(symbols: SymbolDef, field: Optional[str] = None) -> SymbolDef:
+def toposort_symbols(
+ symbols: SymbolDef, field: Optional[str] = None
+) -> SymbolDef:
"""
Topologically sort symbol definitions according to their interdependency
@@ -383,7 +396,9 @@ def _parse_special_functions(sym: sp.Expr, toplevel: bool = True) -> sp.Expr:
if sym.__class__.__name__ in fun_mappings:
return fun_mappings[sym.__class__.__name__](*args)
- elif sym.__class__.__name__ == "piecewise" or isinstance(sym, sp.Piecewise):
+ elif sym.__class__.__name__ == "piecewise" or isinstance(
+ sym, sp.Piecewise
+ ):
if isinstance(sym, sp.Piecewise):
# this is sympy piecewise, can't be nested
denested_args = args
@@ -435,7 +450,9 @@ def _denest_piecewise(
# piece was picked
previous_was_picked = sp.false
# recursively denest those first
- for sub_coeff, sub_cond in grouper(_denest_piecewise(cond.args), 2, True):
+ for sub_coeff, sub_cond in grouper(
+ _denest_piecewise(cond.args), 2, True
+ ):
# flatten the individual pieces
pick_this = sp.And(sp.Not(previous_was_picked), sub_cond)
if sub_coeff == sp.true:
@@ -516,7 +533,9 @@ def _parse_heaviside_trigger(trigger: sp.Expr) -> sp.Expr:
# or(x,y) = not(and(not(x),not(y))
if isinstance(trigger, sp.Or):
- return 1 - sp.Mul(*[1 - _parse_heaviside_trigger(arg) for arg in trigger.args])
+ return 1 - sp.Mul(
+ *[1 - _parse_heaviside_trigger(arg) for arg in trigger.args]
+ )
if isinstance(trigger, sp.And):
return sp.Mul(*[_parse_heaviside_trigger(arg) for arg in trigger.args])
@@ -527,7 +546,9 @@ def _parse_heaviside_trigger(trigger: sp.Expr) -> sp.Expr:
)
-def grouper(iterable: Iterable, n: int, fillvalue: Any = None) -> Iterable[Tuple[Any]]:
+def grouper(
+ iterable: Iterable, n: int, fillvalue: Any = None
+) -> Iterable[Tuple[Any]]:
"""
Collect data into fixed-length chunks or blocks
@@ -659,7 +680,9 @@ def generate_regularization_symbol(observable_id: Union[str, sp.Symbol]):
return symbol_with_assumptions(f"r{observable_id}")
-def generate_flux_symbol(reaction_index: int, name: Optional[str] = None) -> sp.Symbol:
+def generate_flux_symbol(
+ reaction_index: int, name: Optional[str] = None
+) -> sp.Symbol:
"""
Generate identifier symbol for a reaction flux.
This function will always return the same unique python object for a
diff --git a/python/sdist/amici/logging.py b/python/sdist/amici/logging.py
index 5f548de7a1..2648fc5b28 100644
--- a/python/sdist/amici/logging.py
+++ b/python/sdist/amici/logging.py
@@ -166,7 +166,8 @@ def get_logger(
_setup_logger(**kwargs)
elif kwargs:
warnings.warn(
- "AMICI logger already exists, ignoring keyword " "arguments to setup_logger"
+ "AMICI logger already exists, ignoring keyword "
+ "arguments to setup_logger"
)
logger = logging.getLogger(logger_name)
@@ -193,7 +194,8 @@ def decorator_timer(func):
def wrapper_timer(*args, **kwargs):
# append pluses to indicate recursion level
recursion_level = sum(
- frame.function == "wrapper_timer" and frame.filename == __file__
+ frame.function == "wrapper_timer"
+ and frame.filename == __file__
for frame in getouterframes(currentframe(), context=0)
)
diff --git a/python/sdist/amici/numpy.py b/python/sdist/amici/numpy.py
index 91ca6449f6..b84e52cc2b 100644
--- a/python/sdist/amici/numpy.py
+++ b/python/sdist/amici/numpy.py
@@ -10,9 +10,12 @@
import amici
import numpy as np
+import sympy as sp
from . import ExpData, ExpDataPtr, Model, ReturnData, ReturnDataPtr
+StrOrExpr = Union[str, sp.Expr]
+
class SwigPtrView(collections.abc.Mapping):
"""
@@ -134,7 +137,8 @@ def __deepcopy__(self, memo):
:returns: SwigPtrView deep copy
"""
- other = SwigPtrView(self._swigptr)
+ # We assume we have a copy-ctor for the swigptr object
+ other = self.__class__(copy.deepcopy(self._swigptr))
other._field_names = copy.deepcopy(self._field_names)
other._field_dimensions = copy.deepcopy(self._field_dimensions)
other._cache = copy.deepcopy(self._cache)
@@ -148,6 +152,18 @@ def __repr__(self):
"""
return f"<{self.__class__.__name__}({self._swigptr})>"
+ def __eq__(self, other):
+ """
+ Equality check
+
+ :param other: other object
+
+ :returns: whether other object is equal to this object
+ """
+ if not isinstance(other, self.__class__):
+ return False
+ return self._swigptr == other._swigptr
+
class ReturnDataView(SwigPtrView):
"""
@@ -220,7 +236,8 @@ def __init__(self, rdata: Union[ReturnDataPtr, ReturnData]):
"""
if not isinstance(rdata, (ReturnDataPtr, ReturnData)):
raise TypeError(
- f"Unsupported pointer {type(rdata)}, must be" f"amici.ExpDataPtr!"
+ f"Unsupported pointer {type(rdata)}, must be"
+ f"amici.ExpDataPtr!"
)
self._field_dimensions = {
"ts": [rdata.nt],
@@ -293,7 +310,13 @@ def __getitem__(
return super().__getitem__(item)
- def by_id(self, entity_id: str, field: str = None, model: Model = None) -> np.array:
+ def __repr__(self):
+ status = amici.simulation_status_to_str(self._swigptr.status)
+ return f"<{self.__class__.__name__}(id={self._swigptr.id!r}, status={status})>"
+
+ def by_id(
+ self, entity_id: str, field: str = None, model: Model = None
+ ) -> np.array:
"""
Get the value of a given field for a named entity.
@@ -311,11 +334,17 @@ def by_id(self, entity_id: str, field: str = None, model: Model = None) -> np.ar
if field in {"x", "x0", "x_ss", "sx", "sx0", "sx_ss"}:
ids = (model and model.getStateIds()) or self._swigptr.state_ids
elif field in {"w"}:
- ids = (model and model.getExpressionIds()) or self._swigptr.expression_ids
+ ids = (
+ model and model.getExpressionIds()
+ ) or self._swigptr.expression_ids
elif field in {"y", "sy", "sigmay"}:
- ids = (model and model.getObservableIds()) or self._swigptr.observable_ids
+ ids = (
+ model and model.getObservableIds()
+ ) or self._swigptr.observable_ids
elif field in {"sllh"}:
- ids = (model and model.getParameterIds()) or self._swigptr.parameter_ids
+ ids = (
+ model and model.getParameterIds()
+ ) or self._swigptr.parameter_ids
else:
raise NotImplementedError(
f"Subsetting {field} by ID is not implemented or not possible."
@@ -328,9 +357,13 @@ class ExpDataView(SwigPtrView):
"""
Interface class for C++ Exp Data objects that avoids possibly costly
copies of member data.
+
+ NOTE: This currently assumes that the underlying :class:`ExpData`
+ does not change after instantiating an :class:`ExpDataView`.
"""
_field_names = [
+ "ts",
"observedData",
"observedDataStdDev",
"observedEvents",
@@ -348,9 +381,12 @@ def __init__(self, edata: Union[ExpDataPtr, ExpData]):
"""
if not isinstance(edata, (ExpDataPtr, ExpData)):
raise TypeError(
- f"Unsupported pointer {type(edata)}, must be" f"amici.ExpDataPtr!"
+ f"Unsupported pointer {type(edata)}, must be"
+ f"amici.ExpDataPtr!"
)
- self._field_dimensions = { # observables
+ self._field_dimensions = {
+ "ts": [edata.nt()],
+ # observables
"observedData": [edata.nt(), edata.nytrue()],
"observedDataStdDev": [edata.nt(), edata.nytrue()],
# event observables
@@ -365,6 +401,7 @@ def __init__(self, edata: Union[ExpDataPtr, ExpData]):
len(edata.fixedParametersPreequilibration)
],
}
+ edata.ts = edata.ts_
edata.observedData = edata.getObservedData()
edata.observedDataStdDev = edata.getObservedDataStdDev()
edata.observedEvents = edata.getObservedEvents()
@@ -411,9 +448,36 @@ def _entity_type_from_id(
return symbol
else:
if entity_id in getattr(
- rdata if isinstance(rdata, amici.ReturnData) else rdata._swigptr,
+ rdata
+ if isinstance(rdata, amici.ReturnData)
+ else rdata._swigptr,
f"{entity_type.lower()}_ids",
):
return symbol
raise KeyError(f"Unknown symbol {entity_id}.")
+
+
+def evaluate(expr: StrOrExpr, rdata: ReturnDataView) -> np.array:
+ """Evaluate a symbolic expression based on the given simulation outputs.
+
+ :param expr:
+ A symbolic expression, e.g. a sympy expression or a string that can be sympified.
+ Can include state variable, expression, and observable IDs, depending on whether
+ the respective data is available in the simulation results.
+ Parameters are not yet supported.
+ :param rdata:
+ The simulation results.
+
+ :return:
+ The evaluated expression for the simulation output timepoints.
+ """
+ from sympy.utilities.lambdify import lambdify
+
+ if isinstance(expr, str):
+ expr = sp.sympify(expr)
+
+ arg_names = list(sorted(expr.free_symbols, key=lambda x: x.name))
+ func = lambdify(arg_names, expr, "numpy")
+ args = [rdata.by_id(arg.name) for arg in arg_names]
+ return func(*args)
diff --git a/python/sdist/amici/pandas.py b/python/sdist/amici/pandas.py
index dd240242af..8a2eb5049d 100644
--- a/python/sdist/amici/pandas.py
+++ b/python/sdist/amici/pandas.py
@@ -107,7 +107,9 @@ def getDataObservablesAsDataFrame(
_get_names_or_ids(model, "Observable", by_id=by_id)
):
datadict[obs] = npdata["observedData"][i_time, i_obs]
- datadict[obs + "_std"] = npdata["observedDataStdDev"][i_time, i_obs]
+ datadict[obs + "_std"] = npdata["observedDataStdDev"][
+ i_time, i_obs
+ ]
# add conditions
_fill_conditions_dict(datadict, model, edata, by_id=by_id)
@@ -396,12 +398,16 @@ def _fill_conditions_dict(
datadict[par] = model.getFixedParameters()[i_par]
if len(edata.fixedParametersPreequilibration):
- datadict[par + "_preeq"] = edata.fixedParametersPreequilibration[i_par]
+ datadict[par + "_preeq"] = edata.fixedParametersPreequilibration[
+ i_par
+ ]
else:
datadict[par + "_preeq"] = np.nan
if len(edata.fixedParametersPresimulation):
- datadict[par + "_presim"] = edata.fixedParametersPresimulation[i_par]
+ datadict[par + "_presim"] = edata.fixedParametersPresimulation[
+ i_par
+ ]
else:
datadict[par + "_presim"] = np.nan
return datadict
@@ -526,7 +532,9 @@ def _get_expression_cols(model: AmiciModel, by_id: bool) -> List[str]:
)
-def _get_names_or_ids(model: AmiciModel, variable: str, by_id: bool) -> List[str]:
+def _get_names_or_ids(
+ model: AmiciModel, variable: str, by_id: bool
+) -> List[str]:
"""
Obtains a unique list of identifiers for the specified variable.
First tries model.getVariableNames and then uses model.getVariableIds.
@@ -674,22 +682,33 @@ def constructEdataFromDataFrame(
)
# fill in preequilibration parameters
- if any([overwrite_preeq[key] != condition[key] for key in overwrite_preeq]):
- edata.fixedParametersPreequilibration = _get_specialized_fixed_parameters(
- model, condition, overwrite_preeq, by_id=by_id
+ if any(
+ [overwrite_preeq[key] != condition[key] for key in overwrite_preeq]
+ ):
+ edata.fixedParametersPreequilibration = (
+ _get_specialized_fixed_parameters(
+ model, condition, overwrite_preeq, by_id=by_id
+ )
)
elif len(overwrite_preeq):
- edata.fixedParametersPreequilibration = copy.deepcopy(edata.fixedParameters)
+ edata.fixedParametersPreequilibration = copy.deepcopy(
+ edata.fixedParameters
+ )
# fill in presimulation parameters
if any(
- [overwrite_presim[key] != condition[key] for key in overwrite_presim.keys()]
+ [
+ overwrite_presim[key] != condition[key]
+ for key in overwrite_presim.keys()
+ ]
):
edata.fixedParametersPresimulation = _get_specialized_fixed_parameters(
model, condition, overwrite_presim, by_id=by_id
)
elif len(overwrite_presim.keys()):
- edata.fixedParametersPresimulation = copy.deepcopy(edata.fixedParameters)
+ edata.fixedParametersPresimulation = copy.deepcopy(
+ edata.fixedParameters
+ )
# fill in presimulation time
if "t_presim" in condition.keys():
@@ -739,7 +758,9 @@ def getEdataFromDataFrame(
# aggregate features that define a condition
# fixed parameters
- condition_parameters = _get_names_or_ids(model, "FixedParameter", by_id=by_id)
+ condition_parameters = _get_names_or_ids(
+ model, "FixedParameter", by_id=by_id
+ )
# preeq and presim parameters
for par in _get_names_or_ids(model, "FixedParameter", by_id=by_id):
if par + "_preeq" in df.columns:
@@ -758,7 +779,9 @@ def getEdataFromDataFrame(
selected = np.ones((len(df),), dtype=bool)
for par_label, par in row.items():
if math.isnan(par):
- selected = selected & np.isnan(df[par_label].astype(float).values)
+ selected = selected & np.isnan(
+ df[par_label].astype(float).values
+ )
else:
selected = selected & (df[par_label] == par)
edata_df = df[selected]
diff --git a/python/sdist/amici/parameter_mapping.py b/python/sdist/amici/parameter_mapping.py
index 9f4d3b24dd..f1cf75a150 100644
--- a/python/sdist/amici/parameter_mapping.py
+++ b/python/sdist/amici/parameter_mapping.py
@@ -126,7 +126,9 @@ class ParameterMapping(Sequence):
List of parameter mappings for specific conditions.
"""
- def __init__(self, parameter_mappings: List[ParameterMappingForCondition] = None):
+ def __init__(
+ self, parameter_mappings: List[ParameterMappingForCondition] = None
+ ):
super().__init__()
if parameter_mappings is None:
parameter_mappings = []
@@ -146,7 +148,9 @@ def __getitem__(
def __len__(self):
return len(self.parameter_mappings)
- def append(self, parameter_mapping_for_condition: ParameterMappingForCondition):
+ def append(
+ self, parameter_mapping_for_condition: ParameterMappingForCondition
+ ):
"""Append a condition specific parameter mapping."""
self.parameter_mappings.append(parameter_mapping_for_condition)
@@ -188,7 +192,8 @@ def fill_in_parameters(
set(problem_parameters.keys()) - parameter_mapping.free_symbols
):
warnings.warn(
- "The following problem parameters were not used: " + str(unused_parameters),
+ "The following problem parameters were not used: "
+ + str(unused_parameters),
RuntimeWarning,
)
@@ -262,10 +267,12 @@ def _get_par(model_par, value, mapping):
return value
map_preeq_fix = {
- key: _get_par(key, val, map_preeq_fix) for key, val in map_preeq_fix.items()
+ key: _get_par(key, val, map_preeq_fix)
+ for key, val in map_preeq_fix.items()
}
map_sim_fix = {
- key: _get_par(key, val, map_sim_fix) for key, val in map_sim_fix.items()
+ key: _get_par(key, val, map_sim_fix)
+ for key, val in map_sim_fix.items()
}
map_sim_var = {
key: _get_par(key, val, dict(map_sim_fix, **map_sim_var))
@@ -289,7 +296,9 @@ def _get_par(model_par, value, mapping):
# variable parameters and parameter scale
# parameter list from mapping dict
- parameters = [map_sim_var[par_id] for par_id in amici_model.getParameterIds()]
+ parameters = [
+ map_sim_var[par_id] for par_id in amici_model.getParameterIds()
+ ]
# scales list from mapping dict
scales = [
@@ -317,7 +326,8 @@ def _get_par(model_par, value, mapping):
# fixed parameters preequilibration
if map_preeq_fix:
fixed_pars_preeq = [
- map_preeq_fix[par_id] for par_id in amici_model.getFixedParameterIds()
+ map_preeq_fix[par_id]
+ for par_id in amici_model.getFixedParameterIds()
]
edata.fixedParametersPreequilibration = fixed_pars_preeq
@@ -325,7 +335,8 @@ def _get_par(model_par, value, mapping):
# fixed parameters simulation
if map_sim_fix:
fixed_pars_sim = [
- map_sim_fix[par_id] for par_id in amici_model.getFixedParameterIds()
+ map_sim_fix[par_id]
+ for par_id in amici_model.getFixedParameterIds()
]
edata.fixedParameters = fixed_pars_sim
@@ -370,11 +381,14 @@ def scale_parameter(value: numbers.Number, petab_scale: str) -> numbers.Number:
if petab_scale == LOG:
return np.log(value)
raise ValueError(
- f"Unknown parameter scale {petab_scale}. " f"Must be from {(LIN, LOG, LOG10)}"
+ f"Unknown parameter scale {petab_scale}. "
+ f"Must be from {(LIN, LOG, LOG10)}"
)
-def unscale_parameter(value: numbers.Number, petab_scale: str) -> numbers.Number:
+def unscale_parameter(
+ value: numbers.Number, petab_scale: str
+) -> numbers.Number:
"""Bring parameter from scale to linear scale.
:param value:
@@ -392,7 +406,8 @@ def unscale_parameter(value: numbers.Number, petab_scale: str) -> numbers.Number
if petab_scale == LOG:
return np.exp(value)
raise ValueError(
- f"Unknown parameter scale {petab_scale}. " f"Must be from {(LIN, LOG, LOG10)}"
+ f"Unknown parameter scale {petab_scale}. "
+ f"Must be from {(LIN, LOG, LOG10)}"
)
diff --git a/python/sdist/amici/petab_import.py b/python/sdist/amici/petab_import.py
index 909bf250ae..23fe4394f0 100644
--- a/python/sdist/amici/petab_import.py
+++ b/python/sdist/amici/petab_import.py
@@ -306,7 +306,9 @@ def import_petab_problem(
if petab_problem.mapping_df is not None:
# It's partially supported. Remove at your own risk...
- raise NotImplementedError("PEtab v2.0.0 mapping tables are not yet supported.")
+ raise NotImplementedError(
+ "PEtab v2.0.0 mapping tables are not yet supported."
+ )
model_name = model_name or petab_problem.model.model_id
@@ -366,7 +368,9 @@ def import_petab_problem(
model = model_module.getModel()
check_model(amici_model=model, petab_problem=petab_problem)
- logger.info(f"Successfully loaded model {model_name} " f"from {model_output_dir}.")
+ logger.info(
+ f"Successfully loaded model {model_name} " f"from {model_output_dir}."
+ )
return model
@@ -383,7 +387,9 @@ def check_model(
amici_ids = amici_ids_free | set(amici_model.getFixedParameterIds())
petab_ids_free = set(
- petab_problem.parameter_df.loc[petab_problem.parameter_df[ESTIMATE] == 1].index
+ petab_problem.parameter_df.loc[
+ petab_problem.parameter_df[ESTIMATE] == 1
+ ].index
)
amici_ids_free_required = petab_ids_free.intersection(amici_ids)
@@ -429,7 +435,9 @@ def _create_model_name(folder: Union[str, Path]) -> str:
return os.path.split(os.path.normpath(folder))[-1]
-def _can_import_model(model_name: str, model_output_dir: Union[str, Path]) -> bool:
+def _can_import_model(
+ model_name: str, model_output_dir: Union[str, Path]
+) -> bool:
"""
Check whether a module of that name can already be imported.
"""
@@ -555,7 +563,8 @@ def import_model_sbml(
if petab_problem.observable_df is None:
raise NotImplementedError(
- "PEtab import without observables table " "is currently not supported."
+ "PEtab import without observables table "
+ "is currently not supported."
)
assert isinstance(petab_problem.model, SbmlModel)
@@ -596,8 +605,10 @@ def import_model_sbml(
)
sbml_model = sbml_importer.sbml
- allow_n_noise_pars = not petab.lint.observable_table_has_nontrivial_noise_formula(
- petab_problem.observable_df
+ allow_n_noise_pars = (
+ not petab.lint.observable_table_has_nontrivial_noise_formula(
+ petab_problem.observable_df
+ )
)
if (
petab_problem.measurement_df is not None
@@ -632,7 +643,9 @@ def import_model_sbml(
# so we add any output parameters to the SBML model.
# this should be changed to something more elegant
#
- formulas = chain((val["formula"] for val in observables.values()), sigmas.values())
+ formulas = chain(
+ (val["formula"] for val in observables.values()), sigmas.values()
+ )
output_parameters = OrderedDict()
for formula in formulas:
# we want reproducible parameter ordering upon repeated import
@@ -649,10 +662,13 @@ def import_model_sbml(
):
output_parameters[sym] = None
logger.debug(
- "Adding output parameters to model: " f"{list(output_parameters.keys())}"
+ "Adding output parameters to model: "
+ f"{list(output_parameters.keys())}"
)
output_parameter_defaults = output_parameter_defaults or {}
- if extra_pars := (set(output_parameter_defaults) - set(output_parameters.keys())):
+ if extra_pars := (
+ set(output_parameter_defaults) - set(output_parameters.keys())
+ ):
raise ValueError(
f"Default output parameter values were given for {extra_pars}, "
"but they those are not output parameters."
@@ -691,7 +707,8 @@ def import_model_sbml(
# Can only reset parameters after preequilibration if they are fixed.
fixed_parameters.append(PREEQ_INDICATOR_ID)
logger.debug(
- "Adding preequilibration indicator " f"constant {PREEQ_INDICATOR_ID}"
+ "Adding preequilibration indicator "
+ f"constant {PREEQ_INDICATOR_ID}"
)
logger.debug(f"Adding initial assignments for {initial_states.keys()}")
for assignee_id in initial_states:
@@ -756,7 +773,8 @@ def import_model_sbml(
)
if kwargs.get(
- "compile", amici._get_default_argument(sbml_importer.sbml2amici, "compile")
+ "compile",
+ amici._get_default_argument(sbml_importer.sbml2amici, "compile"),
):
# check that the model extension was compiled successfully
model_module = amici.import_model_module(model_name, model_output_dir)
@@ -772,7 +790,9 @@ def import_model_sbml(
def get_observation_model(
observable_df: pd.DataFrame,
-) -> Tuple[Dict[str, Dict[str, str]], Dict[str, str], Dict[str, Union[str, float]]]:
+) -> Tuple[
+ Dict[str, Dict[str, str]], Dict[str, str], Dict[str, Union[str, float]]
+]:
"""
Get observables, sigmas, and noise distributions from PEtab observation
table in a format suitable for
@@ -804,7 +824,9 @@ def get_observation_model(
# cannot handle states in sigma expressions. Therefore, where possible,
# replace species occurring in error model definition by observableIds.
replacements = {
- sp.sympify(observable["formula"], locals=_clash): sp.Symbol(observable_id)
+ sp.sympify(observable["formula"], locals=_clash): sp.Symbol(
+ observable_id
+ )
for observable_id, observable in observables.items()
}
for observable_id, formula in sigmas.items():
@@ -816,7 +838,9 @@ def get_observation_model(
return observables, noise_distrs, sigmas
-def petab_noise_distributions_to_amici(observable_df: pd.DataFrame) -> Dict[str, str]:
+def petab_noise_distributions_to_amici(
+ observable_df: pd.DataFrame,
+) -> Dict[str, str]:
"""
Map from the petab to the amici format of noise distribution
identifiers.
@@ -868,7 +892,9 @@ def show_model_info(sbml_model: "libsbml.Model"):
"""Log some model quantities"""
logger.info(f"Species: {len(sbml_model.getListOfSpecies())}")
- logger.info("Global parameters: " + str(len(sbml_model.getListOfParameters())))
+ logger.info(
+ "Global parameters: " + str(len(sbml_model.getListOfParameters()))
+ )
logger.info(f"Reactions: {len(sbml_model.getListOfReactions())}")
@@ -930,20 +956,35 @@ def _parse_cli_args():
"-s", "--sbml", dest="sbml_file_name", help="SBML model filename"
)
parser.add_argument(
- "-m", "--measurements", dest="measurement_file_name", help="Measurement table"
+ "-m",
+ "--measurements",
+ dest="measurement_file_name",
+ help="Measurement table",
)
parser.add_argument(
- "-c", "--conditions", dest="condition_file_name", help="Conditions table"
+ "-c",
+ "--conditions",
+ dest="condition_file_name",
+ help="Conditions table",
)
parser.add_argument(
- "-p", "--parameters", dest="parameter_file_name", help="Parameter table"
+ "-p",
+ "--parameters",
+ dest="parameter_file_name",
+ help="Parameter table",
)
parser.add_argument(
- "-b", "--observables", dest="observable_file_name", help="Observable table"
+ "-b",
+ "--observables",
+ dest="observable_file_name",
+ help="Observable table",
)
parser.add_argument(
- "-y", "--yaml", dest="yaml_file_name", help="PEtab YAML problem filename"
+ "-y",
+ "--yaml",
+ dest="yaml_file_name",
+ help="PEtab YAML problem filename",
)
parser.add_argument(
@@ -956,7 +997,11 @@ def _parse_cli_args():
args = parser.parse_args()
if not args.yaml_file_name and not all(
- (args.sbml_file_name, args.condition_file_name, args.observable_file_name)
+ (
+ args.sbml_file_name,
+ args.condition_file_name,
+ args.observable_file_name,
+ )
):
parser.error(
"When not specifying a model name or YAML file, then "
diff --git a/python/sdist/amici/petab_import_pysb.py b/python/sdist/amici/petab_import_pysb.py
index 63c1dd9681..8036d1358d 100644
--- a/python/sdist/amici/petab_import_pysb.py
+++ b/python/sdist/amici/petab_import_pysb.py
@@ -23,7 +23,9 @@
logger = get_logger(__name__, logging.WARNING)
-def _add_observation_model(pysb_model: pysb.Model, petab_problem: petab.Problem):
+def _add_observation_model(
+ pysb_model: pysb.Model, petab_problem: petab.Problem
+):
"""Extend PySB model by observation model as defined in the PEtab
observables table"""
@@ -65,7 +67,9 @@ def _add_observation_model(pysb_model: pysb.Model, petab_problem: petab.Problem)
local_syms[sigma_id] = sigma_expr
-def _add_initialization_variables(pysb_model: pysb.Model, petab_problem: petab.Problem):
+def _add_initialization_variables(
+ pysb_model: pysb.Model, petab_problem: petab.Problem
+):
"""Add initialization variables to the PySB model to support initial
conditions specified in the PEtab condition table.
@@ -92,7 +96,8 @@ def _add_initialization_variables(pysb_model: pysb.Model, petab_problem: petab.P
# Can only reset parameters after preequilibration if they are fixed.
fixed_parameters.append(PREEQ_INDICATOR_ID)
logger.debug(
- "Adding preequilibration indicator constant " f"{PREEQ_INDICATOR_ID}"
+ "Adding preequilibration indicator constant "
+ f"{PREEQ_INDICATOR_ID}"
)
logger.debug(f"Adding initial assignments for {initial_states.keys()}")
@@ -131,7 +136,9 @@ def _add_initialization_variables(pysb_model: pysb.Model, petab_problem: petab.P
pysb_model.add_component(formula)
for initial in pysb_model.initials:
- if match_complex_pattern(initial.pattern, species_pattern, exact=True):
+ if match_complex_pattern(
+ initial.pattern, species_pattern, exact=True
+ ):
logger.debug(
"The PySB model has an initial defined for species "
f"{assignee_id}, but this species also has an initial "
@@ -226,9 +233,14 @@ def import_model_pysb(
f"column: {x}"
)
- from .petab_import import get_fixed_parameters, petab_noise_distributions_to_amici
+ from .petab_import import (
+ get_fixed_parameters,
+ petab_noise_distributions_to_amici,
+ )
- constant_parameters = get_fixed_parameters(petab_problem) + fixed_parameters
+ constant_parameters = (
+ get_fixed_parameters(petab_problem) + fixed_parameters
+ )
if petab_problem.observable_df is None:
observables = None
@@ -243,7 +255,9 @@ def import_model_pysb(
sigmas = {obs_id: f"{obs_id}_sigma" for obs_id in observables}
- noise_distrs = petab_noise_distributions_to_amici(petab_problem.observable_df)
+ noise_distrs = petab_noise_distributions_to_amici(
+ petab_problem.observable_df
+ )
from amici.pysb_import import pysb2amici
diff --git a/python/sdist/amici/petab_objective.py b/python/sdist/amici/petab_objective.py
index f518724c82..e3111d3b68 100644
--- a/python/sdist/amici/petab_objective.py
+++ b/python/sdist/amici/petab_objective.py
@@ -144,7 +144,11 @@ def simulate_petab(
# number of amici simulations will be number of unique
# (preequilibrationConditionId, simulationConditionId) pairs.
# Can be optimized by checking for identical condition vectors.
- if simulation_conditions is None and parameter_mapping is None and edatas is None:
+ if (
+ simulation_conditions is None
+ and parameter_mapping is None
+ and edatas is None
+ ):
simulation_conditions = (
petab_problem.get_simulation_conditions_from_measurement_df()
)
@@ -262,7 +266,8 @@ def aggregate_sllh(
if petab_scale and petab_problem is None:
raise ValueError(
- "Please provide the PEtab problem, when using " "`petab_scale=True`."
+ "Please provide the PEtab problem, when using "
+ "`petab_scale=True`."
)
# Check for issues in all condition simulation results.
@@ -280,7 +285,9 @@ def aggregate_sllh(
for condition_parameter_mapping, edata, rdata in zip(
parameter_mapping, edatas, rdatas
):
- for sllh_parameter_index, condition_parameter_sllh in enumerate(rdata.sllh):
+ for sllh_parameter_index, condition_parameter_sllh in enumerate(
+ rdata.sllh
+ ):
# Get PEtab parameter ID
# Use ExpData if it provides a parameter list, else default to
# Model.
@@ -301,9 +308,11 @@ def aggregate_sllh(
if petab_scale:
# `ParameterMappingForCondition` objects provide the scale in
# terms of `petab.C` constants already, not AMICI equivalents.
- model_parameter_scale = condition_parameter_mapping.scale_map_sim_var[
- model_parameter_id
- ]
+ model_parameter_scale = (
+ condition_parameter_mapping.scale_map_sim_var[
+ model_parameter_id
+ ]
+ )
petab_parameter_scale = petab_problem.parameter_df.loc[
petab_parameter_id, PARAMETER_SCALE
]
@@ -362,7 +371,9 @@ def rescale_sensitivity(
scale[(LOG10, LOG)] = lambda s: scale[(LIN, LOG)](scale[(LOG10, LIN)](s))
if (old_scale, new_scale) not in scale:
- raise NotImplementedError(f"Old scale: {old_scale}. New scale: {new_scale}.")
+ raise NotImplementedError(
+ f"Old scale: {old_scale}. New scale: {new_scale}."
+ )
return scale[(old_scale, new_scale)](sensitivity)
@@ -497,14 +508,19 @@ def create_parameter_mapping(
if parameter_mapping_kwargs is None:
parameter_mapping_kwargs = {}
- prelim_parameter_mapping = petab.get_optimization_to_simulation_parameter_mapping(
- condition_df=petab_problem.condition_df,
- measurement_df=petab_problem.measurement_df,
- parameter_df=petab_problem.parameter_df,
- observable_df=petab_problem.observable_df,
- mapping_df=petab_problem.mapping_df,
- model=petab_problem.model,
- **dict(default_parameter_mapping_kwargs, **parameter_mapping_kwargs),
+ prelim_parameter_mapping = (
+ petab.get_optimization_to_simulation_parameter_mapping(
+ condition_df=petab_problem.condition_df,
+ measurement_df=petab_problem.measurement_df,
+ parameter_df=petab_problem.parameter_df,
+ observable_df=petab_problem.observable_df,
+ mapping_df=petab_problem.mapping_df,
+ model=petab_problem.model,
+ simulation_conditions=simulation_conditions,
+ **dict(
+ default_parameter_mapping_kwargs, **parameter_mapping_kwargs
+ ),
+ )
)
parameter_mapping = ParameterMapping()
@@ -529,7 +545,8 @@ def _get_initial_state_sbml(
)
if initial_assignment:
initial_assignment = sp.sympify(
- libsbml.formulaToL3String(initial_assignment.getMath()), locals=_clash
+ libsbml.formulaToL3String(initial_assignment.getMath()),
+ locals=_clash,
)
if type_code == libsbml.SBML_SPECIES:
value = (
@@ -538,12 +555,21 @@ def _get_initial_state_sbml(
else initial_assignment
)
elif type_code == libsbml.SBML_PARAMETER:
- value = element.getValue() if initial_assignment is None else initial_assignment
+ value = (
+ element.getValue()
+ if initial_assignment is None
+ else initial_assignment
+ )
elif type_code == libsbml.SBML_COMPARTMENT:
- value = element.getSize() if initial_assignment is None else initial_assignment
+ value = (
+ element.getSize()
+ if initial_assignment is None
+ else initial_assignment
+ )
else:
raise NotImplementedError(
- f"Don't know what how to handle {element_id} in " "condition table."
+ f"Don't know what how to handle {element_id} in "
+ "condition table."
)
return value
@@ -559,7 +585,9 @@ def _get_initial_state_pysb(
(
initial.value
for initial in petab_problem.model.model.initials
- if match_complex_pattern(initial.pattern, species_pattern, exact=True)
+ if match_complex_pattern(
+ initial.pattern, species_pattern, exact=True
+ )
),
0.0,
)
@@ -616,9 +644,9 @@ def _set_initial_state(
scale_map[init_par_id] = petab.LIN
else:
# parametric initial state
- scale_map[init_par_id] = petab_problem.parameter_df[PARAMETER_SCALE].get(
- value, petab.LIN
- )
+ scale_map[init_par_id] = petab_problem.parameter_df[
+ PARAMETER_SCALE
+ ].get(value, petab.LIN)
def create_parameter_mapping_for_condition(
@@ -656,13 +684,17 @@ def create_parameter_mapping_for_condition(
condition_map_sim
) != len(condition_scale_map_sim):
raise AssertionError(
- "Number of parameters and number of parameter " "scales do not match."
+ "Number of parameters and number of parameter "
+ "scales do not match."
)
- if len(condition_map_preeq) and len(condition_map_preeq) != len(condition_map_sim):
+ if len(condition_map_preeq) and len(condition_map_preeq) != len(
+ condition_map_sim
+ ):
logger.debug(f"Preequilibration parameter map: {condition_map_preeq}")
logger.debug(f"Simulation parameter map: {condition_map_sim}")
raise AssertionError(
- "Number of parameters for preequilbration " "and simulation do not match."
+ "Number of parameters for preequilbration "
+ "and simulation do not match."
)
##########################################################################
@@ -690,7 +722,10 @@ def create_parameter_mapping_for_condition(
condition_map_sim[PREEQ_INDICATOR_ID] = 0.0
condition_scale_map_sim[PREEQ_INDICATOR_ID] = LIN
- for element_id, (value, preeq_value) in states_in_condition_table.items():
+ for element_id, (
+ value,
+ preeq_value,
+ ) in states_in_condition_table.items():
# for preequilibration
init_par_id = f"initial_{element_id}_preeq"
if (
@@ -738,7 +773,10 @@ def create_parameter_mapping_for_condition(
condition_map_preeq, variable_par_ids, fixed_par_ids
)
- condition_scale_map_preeq_var, condition_scale_map_preeq_fix = _subset_dict(
+ (
+ condition_scale_map_preeq_var,
+ condition_scale_map_preeq_fix,
+ ) = _subset_dict(
condition_scale_map_preeq, variable_par_ids, fixed_par_ids
)
@@ -750,9 +788,13 @@ def create_parameter_mapping_for_condition(
condition_scale_map_sim, variable_par_ids, fixed_par_ids
)
- logger.debug("Fixed parameters preequilibration: " f"{condition_map_preeq_fix}")
+ logger.debug(
+ "Fixed parameters preequilibration: " f"{condition_map_preeq_fix}"
+ )
logger.debug("Fixed parameters simulation: " f"{condition_map_sim_fix}")
- logger.debug("Variable parameters preequilibration: " f"{condition_map_preeq_var}")
+ logger.debug(
+ "Variable parameters preequilibration: " f"{condition_map_preeq_var}"
+ )
logger.debug("Variable parameters simulation: " f"{condition_map_sim_var}")
petab.merge_preeq_and_sim_pars_condition(
@@ -874,7 +916,10 @@ def create_edata_for_condition(
states_in_condition_table = get_states_in_condition_table(
petab_problem, condition=condition
)
- if condition.get(PREEQUILIBRATION_CONDITION_ID) and states_in_condition_table:
+ if (
+ condition.get(PREEQUILIBRATION_CONDITION_ID)
+ and states_in_condition_table
+ ):
state_ids = amici_model.getStateIds()
state_idx_reinitalization = [
state_ids.index(s)
@@ -893,7 +938,9 @@ def create_edata_for_condition(
# timepoints
# find replicate numbers of time points
- timepoints_w_reps = _get_timepoints_with_replicates(df_for_condition=measurement_df)
+ timepoints_w_reps = _get_timepoints_with_replicates(
+ df_for_condition=measurement_df
+ )
edata.setTimepoints(timepoints_w_reps)
##########################################################################
@@ -946,7 +993,9 @@ def _get_timepoints_with_replicates(
timepoints_w_reps = []
for time in timepoints:
# subselect for time
- df_for_time = df_for_condition[df_for_condition.time.astype(float) == time]
+ df_for_time = df_for_condition[
+ df_for_condition.time.astype(float) == time
+ ]
# rep number is maximum over rep numbers for observables
n_reps = max(df_for_time.groupby([OBSERVABLE_ID, TIME]).size())
# append time point n_rep times
@@ -979,7 +1028,9 @@ def _get_measurements_and_sigmas(
arrays for measurement and sigmas
"""
# prepare measurement matrix
- y = np.full(shape=(len(timepoints_w_reps), len(observable_ids)), fill_value=np.nan)
+ y = np.full(
+ shape=(len(timepoints_w_reps), len(observable_ids)), fill_value=np.nan
+ )
# prepare sigma matrix
sigma_y = y.copy()
@@ -1008,15 +1059,19 @@ def _get_measurements_and_sigmas(
y[time_ix_for_obs_ix[observable_ix], observable_ix] = measurement[
MEASUREMENT
]
- if isinstance(measurement.get(NOISE_PARAMETERS, None), numbers.Number):
- sigma_y[time_ix_for_obs_ix[observable_ix], observable_ix] = measurement[
- NOISE_PARAMETERS
- ]
+ if isinstance(
+ measurement.get(NOISE_PARAMETERS, None), numbers.Number
+ ):
+ sigma_y[
+ time_ix_for_obs_ix[observable_ix], observable_ix
+ ] = measurement[NOISE_PARAMETERS]
return y, sigma_y
def rdatas_to_measurement_df(
- rdatas: Sequence[amici.ReturnData], model: AmiciModel, measurement_df: pd.DataFrame
+ rdatas: Sequence[amici.ReturnData],
+ model: AmiciModel,
+ measurement_df: pd.DataFrame,
) -> pd.DataFrame:
"""
Create a measurement dataframe in the PEtab format from the passed
@@ -1047,7 +1102,9 @@ def rdatas_to_measurement_df(
t = list(rdata.ts)
# extract rows for condition
- cur_measurement_df = petab.get_rows_for_condition(measurement_df, condition)
+ cur_measurement_df = petab.get_rows_for_condition(
+ measurement_df, condition
+ )
# iterate over entries for the given condition
# note: this way we only generate a dataframe entry for every
@@ -1072,7 +1129,9 @@ def rdatas_to_measurement_df(
def rdatas_to_simulation_df(
- rdatas: Sequence[amici.ReturnData], model: AmiciModel, measurement_df: pd.DataFrame
+ rdatas: Sequence[amici.ReturnData],
+ model: AmiciModel,
+ measurement_df: pd.DataFrame,
) -> pd.DataFrame:
"""Create a PEtab simulation dataframe from
:class:`amici.amici.ReturnData` s.
diff --git a/python/sdist/amici/petab_simulate.py b/python/sdist/amici/petab_simulate.py
index d243a28b8b..32c1ef8955 100644
--- a/python/sdist/amici/petab_simulate.py
+++ b/python/sdist/amici/petab_simulate.py
@@ -18,7 +18,11 @@
import petab
from amici import AmiciModel, SensitivityMethod_none
from amici.petab_import import import_petab_problem
-from amici.petab_objective import RDATAS, rdatas_to_measurement_df, simulate_petab
+from amici.petab_objective import (
+ RDATAS,
+ rdatas_to_measurement_df,
+ simulate_petab,
+)
AMICI_MODEL = "amici_model"
AMICI_SOLVER = "solver"
@@ -49,7 +53,10 @@ def simulate_without_noise(self, **kwargs) -> pd.DataFrame:
in the Simulator constructor (including the PEtab problem).
"""
if AMICI_MODEL in {*kwargs, *dir(self)} and (
- any(k in kwargs for k in inspect.signature(import_petab_problem).parameters)
+ any(
+ k in kwargs
+ for k in inspect.signature(import_petab_problem).parameters
+ )
):
print(
"Arguments related to the PEtab import are unused if "
diff --git a/python/sdist/amici/petab_util.py b/python/sdist/amici/petab_util.py
index a9666d84ac..9108b108bc 100644
--- a/python/sdist/amici/petab_util.py
+++ b/python/sdist/amici/petab_util.py
@@ -27,7 +27,9 @@ def get_states_in_condition_table(
raise NotImplementedError()
species_check_funs = {
- MODEL_TYPE_SBML: lambda x: _element_is_sbml_state(petab_problem.sbml_model, x),
+ MODEL_TYPE_SBML: lambda x: _element_is_sbml_state(
+ petab_problem.sbml_model, x
+ ),
MODEL_TYPE_PYSB: lambda x: _element_is_pysb_pattern(
petab_problem.model.model, x
),
@@ -36,7 +38,9 @@ def get_states_in_condition_table(
resolve_mapping(petab_problem.mapping_df, col): (None, None)
if condition is None
else (
- petab_problem.condition_df.loc[condition[SIMULATION_CONDITION_ID], col],
+ petab_problem.condition_df.loc[
+ condition[SIMULATION_CONDITION_ID], col
+ ],
petab_problem.condition_df.loc[
condition[PREEQUILIBRATION_CONDITION_ID], col
]
@@ -60,7 +64,9 @@ def get_states_in_condition_table(
pysb.bng.generate_equations(petab_problem.model.model)
try:
- spm = pysb.pattern.SpeciesPatternMatcher(model=petab_problem.model.model)
+ spm = pysb.pattern.SpeciesPatternMatcher(
+ model=petab_problem.model.model
+ )
except NotImplementedError as e:
raise NotImplementedError(
"Requires https://github.com/pysb/pysb/pull/570. "
diff --git a/python/sdist/amici/plotting.py b/python/sdist/amici/plotting.py
index da718c1ec7..bd1f3a8ba1 100644
--- a/python/sdist/amici/plotting.py
+++ b/python/sdist/amici/plotting.py
@@ -3,7 +3,7 @@
--------
Plotting related functions
"""
-from typing import Iterable, Optional
+from typing import Iterable, Optional, Sequence, Union
import matplotlib.pyplot as plt
import pandas as pd
@@ -11,6 +11,7 @@
from matplotlib.axes import Axes
from . import Model, ReturnDataView
+from .numpy import StrOrExpr, evaluate
def plot_state_trajectories(
@@ -115,3 +116,26 @@ def plot_jacobian(rdata: ReturnDataView):
# backwards compatibility
plotStateTrajectories = plot_state_trajectories
plotObservableTrajectories = plot_observable_trajectories
+
+
+def plot_expressions(
+ exprs: Union[Sequence[StrOrExpr], StrOrExpr], rdata: ReturnDataView
+) -> None:
+ """Plot the given expressions evaluated on the given simulation outputs.
+
+ :param exprs:
+ A symbolic expression, e.g. a sympy expression or a string that can be sympified.
+ Can include state variable, expression, and observable IDs, depending on whether
+ the respective data is available in the simulation results.
+ Parameters are not yet supported.
+ :param rdata:
+ The simulation results.
+ """
+ if not isinstance(exprs, Sequence) or isinstance(exprs, str):
+ exprs = [exprs]
+
+ for expr in exprs:
+ plt.plot(rdata.t, evaluate(expr, rdata), label=str(expr))
+
+ plt.legend()
+ plt.gca().set_xlabel("$t$")
diff --git a/python/sdist/amici/pysb_import.py b/python/sdist/amici/pysb_import.py
index 7e413a2a88..aa1dc7cd9b 100644
--- a/python/sdist/amici/pysb_import.py
+++ b/python/sdist/amici/pysb_import.py
@@ -10,7 +10,17 @@
import os
import sys
from pathlib import Path
-from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Set,
+ Tuple,
+ Union,
+)
import numpy as np
import pysb
@@ -112,8 +122,8 @@ def pysb2amici(
errors
:param compiler:
- distutils/setuptools compiler selection to build the python
- extension
+ Absolute path to the compiler executable to be used to build the Python
+ extension, e.g. ``/usr/bin/clang``.
:param compute_conservation_laws:
if set to ``True``, conservation laws are automatically computed and
@@ -255,8 +265,12 @@ def ode_model_from_pysb_importer(
_process_pysb_parameters(model, ode, constant_parameters)
if compute_conservation_laws:
_process_pysb_conservation_laws(model, ode)
- _process_pysb_observables(model, ode, observables, sigmas, noise_distributions)
- _process_pysb_expressions(model, ode, observables, sigmas, noise_distributions)
+ _process_pysb_observables(
+ model, ode, observables, sigmas, noise_distributions
+ )
+ _process_pysb_expressions(
+ model, ode, observables, sigmas, noise_distributions
+ )
ode._has_quadratic_nllh = not noise_distributions or all(
noise_distr in ["normal", "lin-normal", "log-normal", "log10-normal"]
for noise_distr in noise_distributions.values()
@@ -382,7 +396,9 @@ def _process_pysb_species(pysb_model: pysb.Model, ode_model: DEModel) -> None:
for ix, specie in enumerate(pysb_model.species):
init = sp.sympify("0.0")
for ic in pysb_model.odes.model.initials:
- if pysb.pattern.match_complex_pattern(ic.pattern, specie, exact=True):
+ if pysb.pattern.match_complex_pattern(
+ ic.pattern, specie, exact=True
+ ):
# we don't want to allow expressions in initial conditions
if ic.value in pysb_model.expressions:
init = pysb_model.expressions[ic.value.name].expand_expr()
@@ -390,7 +406,9 @@ def _process_pysb_species(pysb_model: pysb.Model, ode_model: DEModel) -> None:
init = ic.value
ode_model.add_component(
- DifferentialState(sp.Symbol(f"__s{ix}"), f"{specie}", init, xdot[ix])
+ DifferentialState(
+ sp.Symbol(f"__s{ix}"), f"{specie}", init, xdot[ix]
+ )
)
logger.debug(f"Finished Processing PySB species ")
@@ -464,7 +482,8 @@ def _process_pysb_expressions(
include_derived=True
) | pysb_model.expressions_dynamic(include_derived=True):
if any(
- isinstance(symbol, pysb.Tag) for symbol in expr.expand_expr().free_symbols
+ isinstance(symbol, pysb.Tag)
+ for symbol in expr.expand_expr().free_symbols
):
# we only need explicit instantiations of expressions with tags,
# which are defined in the derived expressions. The abstract
@@ -521,11 +540,15 @@ def _add_expression(
:param ode_model:
see :py:func:`_process_pysb_expressions`
"""
- ode_model.add_component(Expression(sym, name, _parse_special_functions(expr)))
+ ode_model.add_component(
+ Expression(sym, name, _parse_special_functions(expr))
+ )
if name in observables:
noise_dist = (
- noise_distributions.get(name, "normal") if noise_distributions else "normal"
+ noise_distributions.get(name, "normal")
+ if noise_distributions
+ else "normal"
)
y = sp.Symbol(f"{name}")
@@ -533,7 +556,9 @@ def _add_expression(
obs = Observable(y, name, sym, transformation=trafo)
ode_model.add_component(obs)
- sigma_name, sigma_value = _get_sigma_name_and_value(pysb_model, name, sigmas)
+ sigma_name, sigma_value = _get_sigma_name_and_value(
+ pysb_model, name, sigmas
+ )
sigma = sp.Symbol(sigma_name)
ode_model.add_component(SigmaY(sigma, f"{sigma_name}", sigma_value))
@@ -542,10 +567,14 @@ def _add_expression(
my = generate_measurement_symbol(obs.get_id())
cost_fun_expr = sp.sympify(
cost_fun_str,
- locals=dict(zip(_get_str_symbol_identifiers(name), (y, my, sigma))),
+ locals=dict(
+ zip(_get_str_symbol_identifiers(name), (y, my, sigma))
+ ),
)
ode_model.add_component(
- LogLikelihoodY(sp.Symbol(f"llh_{name}"), f"llh_{name}", cost_fun_expr)
+ LogLikelihoodY(
+ sp.Symbol(f"llh_{name}"), f"llh_{name}", cost_fun_expr
+ )
)
@@ -575,7 +604,9 @@ def _get_sigma_name_and_value(
sigma_name = sigmas[obs_name]
try:
# find corresponding Expression instance
- sigma_expr = next(x for x in pysb_model.expressions if x.name == sigma_name)
+ sigma_expr = next(
+ x for x in pysb_model.expressions if x.name == sigma_name
+ )
except StopIteration:
raise ValueError(
f"value of sigma {obs_name} is not a " f"valid expression."
@@ -633,7 +664,9 @@ def _process_pysb_observables(
@log_execution_time("computing PySB conservation laws", logger)
-def _process_pysb_conservation_laws(pysb_model: pysb.Model, ode_model: DEModel) -> None:
+def _process_pysb_conservation_laws(
+ pysb_model: pysb.Model, ode_model: DEModel
+) -> None:
"""
Removes species according to conservation laws to ensure that the
jacobian has full rank
@@ -647,7 +680,9 @@ def _process_pysb_conservation_laws(pysb_model: pysb.Model, ode_model: DEModel)
monomers_without_conservation_law = set()
for rule in pysb_model.rules:
- monomers_without_conservation_law |= _get_unconserved_monomers(rule, pysb_model)
+ monomers_without_conservation_law |= _get_unconserved_monomers(
+ rule, pysb_model
+ )
monomers_without_conservation_law |= (
_compute_monomers_with_fixed_initial_conditions(pysb_model)
@@ -667,7 +702,9 @@ def _process_pysb_conservation_laws(pysb_model: pysb.Model, ode_model: DEModel)
ode_model.add_conservation_law(**cl)
-def _compute_monomers_with_fixed_initial_conditions(pysb_model: pysb.Model) -> Set[str]:
+def _compute_monomers_with_fixed_initial_conditions(
+ pysb_model: pysb.Model,
+) -> Set[str]:
"""
Computes the set of monomers in a model with species that have fixed
initial conditions
@@ -696,7 +733,9 @@ def _compute_monomers_with_fixed_initial_conditions(pysb_model: pysb.Model) -> S
def _generate_cl_prototypes(
- excluded_monomers: Iterable[str], pysb_model: pysb.Model, ode_model: DEModel
+ excluded_monomers: Iterable[str],
+ pysb_model: pysb.Model,
+ ode_model: DEModel,
) -> CL_Prototype:
"""
Constructs a dict that contains preprocessed information for the
@@ -717,7 +756,9 @@ def _generate_cl_prototypes(
"""
cl_prototypes = dict()
- _compute_possible_indices(cl_prototypes, pysb_model, ode_model, excluded_monomers)
+ _compute_possible_indices(
+ cl_prototypes, pysb_model, ode_model, excluded_monomers
+ )
_compute_dependency_idx(cl_prototypes)
_compute_target_index(cl_prototypes, ode_model)
@@ -825,7 +866,9 @@ def _compute_dependency_idx(cl_prototypes: CL_Prototype) -> None:
prototype_j["dependency_idx"][idx] |= {monomer_i}
-def _compute_target_index(cl_prototypes: CL_Prototype, ode_model: DEModel) -> None:
+def _compute_target_index(
+ cl_prototypes: CL_Prototype, ode_model: DEModel
+) -> None:
"""
Computes the target index for every monomer
@@ -885,7 +928,9 @@ def _compute_target_index(cl_prototypes: CL_Prototype, ode_model: DEModel) -> No
# multimers has a low upper bound and the species count does not
# vary too much across conservation laws, this approximation
# should be fine
- prototype["fillin"] = prototype["appearance_count"] * prototype["species_count"]
+ prototype["fillin"] = (
+ prototype["appearance_count"] * prototype["species_count"]
+ )
# we might end up with the same index for multiple monomers, so loop until
# we have a set of unique target indices
@@ -936,7 +981,9 @@ def _cl_has_cycle(monomer: str, cl_prototypes: CL_Prototype) -> bool:
root = monomer
return any(
_is_in_cycle(connecting_monomer, cl_prototypes, visited, root)
- for connecting_monomer in prototype["dependency_idx"][prototype["target_index"]]
+ for connecting_monomer in prototype["dependency_idx"][
+ prototype["target_index"]
+ ]
)
@@ -980,7 +1027,9 @@ def _is_in_cycle(
return any(
_is_in_cycle(connecting_monomer, cl_prototypes, visited, root)
- for connecting_monomer in prototype["dependency_idx"][prototype["target_index"]]
+ for connecting_monomer in prototype["dependency_idx"][
+ prototype["target_index"]
+ ]
)
@@ -997,9 +1046,9 @@ def _greedy_target_index_update(cl_prototypes: CL_Prototype) -> None:
target_indices = _get_target_indices(cl_prototypes)
for monomer, prototype in cl_prototypes.items():
- if target_indices.count(prototype["target_index"]) > 1 or _cl_has_cycle(
- monomer, cl_prototypes
- ):
+ if target_indices.count(
+ prototype["target_index"]
+ ) > 1 or _cl_has_cycle(monomer, cl_prototypes):
# compute how much fillin the next best target_index would yield
# we exclude already existing target indices to avoid that
@@ -1008,7 +1057,9 @@ def _greedy_target_index_update(cl_prototypes: CL_Prototype) -> None:
# solution but prevents infinite loops
for target_index in list(set(target_indices)):
try:
- local_idx = prototype["possible_indices"].index(target_index)
+ local_idx = prototype["possible_indices"].index(
+ target_index
+ )
except ValueError:
local_idx = None
@@ -1023,13 +1074,16 @@ def _greedy_target_index_update(cl_prototypes: CL_Prototype) -> None:
idx = np.argmin(prototype["appearance_counts"])
prototype["local_index"] = idx
- prototype["alternate_target_index"] = prototype["possible_indices"][idx]
- prototype["alternate_appearance_count"] = prototype["appearance_counts"][
- idx
- ]
+ prototype["alternate_target_index"] = prototype[
+ "possible_indices"
+ ][idx]
+ prototype["alternate_appearance_count"] = prototype[
+ "appearance_counts"
+ ][idx]
prototype["alternate_fillin"] = (
- prototype["alternate_appearance_count"] * prototype["species_count"]
+ prototype["alternate_appearance_count"]
+ * prototype["species_count"]
)
prototype["diff_fillin"] = (
@@ -1038,13 +1092,18 @@ def _greedy_target_index_update(cl_prototypes: CL_Prototype) -> None:
else:
prototype["diff_fillin"] = -1
- if all(prototype["diff_fillin"] == -1 for prototype in cl_prototypes.values()):
+ if all(
+ prototype["diff_fillin"] == -1 for prototype in cl_prototypes.values()
+ ):
raise RuntimeError(
- "Could not compute a valid set of conservation " "laws for this model!"
+ "Could not compute a valid set of conservation "
+ "laws for this model!"
)
# this puts prototypes with high diff_fillin last
- cl_prototypes = sorted(cl_prototypes.items(), key=lambda kv: kv[1]["diff_fillin"])
+ cl_prototypes = sorted(
+ cl_prototypes.items(), key=lambda kv: kv[1]["diff_fillin"]
+ )
cl_prototypes = {proto[0]: proto[1] for proto in cl_prototypes}
for monomer in cl_prototypes:
@@ -1058,12 +1117,15 @@ def _greedy_target_index_update(cl_prototypes: CL_Prototype) -> None:
# are recomputed on the fly)
if prototype["diff_fillin"] > -1 and (
- _get_target_indices(cl_prototypes).count(prototype["target_index"]) > 1
+ _get_target_indices(cl_prototypes).count(prototype["target_index"])
+ > 1
or _cl_has_cycle(monomer, cl_prototypes)
):
prototype["fillin"] = prototype["alternate_fillin"]
prototype["target_index"] = prototype["alternate_target_index"]
- prototype["appearance_count"] = prototype["alternate_appearance_count"]
+ prototype["appearance_count"] = prototype[
+ "alternate_appearance_count"
+ ]
del prototype["possible_indices"][prototype["local_index"]]
del prototype["appearance_counts"][prototype["local_index"]]
@@ -1146,7 +1208,9 @@ def _add_conservation_for_constant_species(
)
-def _flatten_conservation_laws(conservation_laws: List[ConservationLaw]) -> None:
+def _flatten_conservation_laws(
+ conservation_laws: List[ConservationLaw],
+) -> None:
"""
Flatten the conservation laws such that the state_expr not longer
depend on any states that are replaced by conservation laws
@@ -1160,9 +1224,12 @@ def _flatten_conservation_laws(conservation_laws: List[ConservationLaw]) -> None
for cl in conservation_laws:
# only update if we changed something
if any(
- _apply_conseration_law_sub(cl, sub) for sub in conservation_law_subs
+ _apply_conseration_law_sub(cl, sub)
+ for sub in conservation_law_subs
):
- conservation_law_subs = _get_conservation_law_subs(conservation_laws)
+ conservation_law_subs = _get_conservation_law_subs(
+ conservation_laws
+ )
def _apply_conseration_law_sub(
@@ -1245,7 +1312,9 @@ def _get_conservation_law_subs(
def has_fixed_parameter_ic(
- specie: pysb.core.ComplexPattern, pysb_model: pysb.Model, ode_model: DEModel
+ specie: pysb.core.ComplexPattern,
+ pysb_model: pysb.Model,
+ ode_model: DEModel,
) -> bool:
"""
Wrapper to interface
@@ -1271,7 +1340,9 @@ def has_fixed_parameter_ic(
(
ic
for ic, condition in enumerate(pysb_model.initials)
- if pysb.pattern.match_complex_pattern(condition[0], specie, exact=True)
+ if pysb.pattern.match_complex_pattern(
+ condition[0], specie, exact=True
+ )
),
None,
)
@@ -1304,7 +1375,9 @@ def extract_monomers(
]
-def _get_unconserved_monomers(rule: pysb.Rule, pysb_model: pysb.Model) -> Set[str]:
+def _get_unconserved_monomers(
+ rule: pysb.Rule, pysb_model: pysb.Model
+) -> Set[str]:
"""
Constructs the set of monomer names for which the specified rule changes
the stoichiometry of the monomer in the specified model.
@@ -1320,11 +1393,16 @@ def _get_unconserved_monomers(rule: pysb.Rule, pysb_model: pysb.Model) -> Set[st
"""
unconserved_monomers = set()
- if not rule.delete_molecules and len(rule.product_pattern.complex_patterns) == 0:
+ if (
+ not rule.delete_molecules
+ and len(rule.product_pattern.complex_patterns) == 0
+ ):
# if delete_molecules is not True but we have a degradation rule,
# we have to actually go through the reactions that are created by
# the rule
- for reaction in [r for r in pysb_model.reactions if rule.name in r["rule"]]:
+ for reaction in [
+ r for r in pysb_model.reactions if rule.name in r["rule"]
+ ]:
unconserved_monomers |= _get_changed_stoichiometries(
[pysb_model.species[ix] for ix in reaction["reactants"]],
[pysb_model.species[ix] for ix in reaction["products"]],
@@ -1377,7 +1455,9 @@ def pysb_model_from_path(pysb_model_file: Union[str, Path]) -> pysb.Model:
:return: The pysb Model instance
"""
- pysb_model_module_name = os.path.splitext(os.path.split(pysb_model_file)[-1])[0]
+ pysb_model_module_name = os.path.splitext(
+ os.path.split(pysb_model_file)[-1]
+ )[0]
import importlib.util
diff --git a/python/sdist/amici/sbml_import.py b/python/sdist/amici/sbml_import.py
index 8c43d35cf2..dd24b98cf8 100644
--- a/python/sdist/amici/sbml_import.py
+++ b/python/sdist/amici/sbml_import.py
@@ -211,7 +211,9 @@ def _process_document(self) -> None:
"""
# Ensure we got a valid SBML model, otherwise further processing
# might lead to undefined results
- log_execution_time("validating SBML", logger)(self.sbml_doc.validateSBML)()
+ log_execution_time("validating SBML", logger)(
+ self.sbml_doc.validateSBML
+ )()
_check_lib_sbml_errors(self.sbml_doc, self.show_sbml_warnings)
# Flatten "comp" model? Do that before any other converters are run
@@ -251,7 +253,9 @@ def _process_document(self) -> None:
self.sbml_doc.convert
)(convert_config)
- convert_config = sbml.SBMLLocalParameterConverter().getDefaultProperties()
+ convert_config = (
+ sbml.SBMLLocalParameterConverter().getDefaultProperties()
+ )
log_execution_time("converting SBML local parameters", logger)(
self.sbml_doc.convert
)(convert_config)
@@ -365,8 +369,8 @@ def sbml2amici(
negative due to numerical errors
:param compiler:
- distutils/setuptools compiler selection to build the
- python extension
+ Absolute path to the compiler executable to be used to build the Python
+ extension, e.g. ``/usr/bin/clang``.
:param allow_reinit_fixpar_initcond:
see :class:`amici.de_export.ODEExporter`
@@ -471,7 +475,9 @@ def _build_ode_model(
See :py:func:`sbml2amici` for parameters.
"""
- constant_parameters = list(constant_parameters) if constant_parameters else []
+ constant_parameters = (
+ list(constant_parameters) if constant_parameters else []
+ )
hardcode_symbols = set(hardcode_symbols) if hardcode_symbols else {}
if invalid := (set(constant_parameters) & set(hardcode_symbols)):
@@ -494,10 +500,13 @@ def _build_ode_model(
self._reset_symbols()
self.sbml_parser_settings.setParseLog(
- sbml.L3P_PARSE_LOG_AS_LOG10 if log_as_log10 else sbml.L3P_PARSE_LOG_AS_LN
+ sbml.L3P_PARSE_LOG_AS_LOG10
+ if log_as_log10
+ else sbml.L3P_PARSE_LOG_AS_LN
)
self._process_sbml(
- constant_parameters=constant_parameters, hardcode_symbols=hardcode_symbols
+ constant_parameters=constant_parameters,
+ hardcode_symbols=hardcode_symbols,
)
if (
@@ -530,7 +539,9 @@ def _build_ode_model(
simplify=simplify,
cache_simplify=cache_simplify,
)
- ode_model.import_from_sbml_importer(self, compute_cls=compute_conservation_laws)
+ ode_model.import_from_sbml_importer(
+ self, compute_cls=compute_conservation_laws
+ )
return ode_model
@log_execution_time("importing SBML", logger)
@@ -552,7 +563,8 @@ def _process_sbml(
self.check_support()
self._gather_locals(hardcode_symbols=hardcode_symbols)
self._process_parameters(
- constant_parameters=constant_parameters, hardcode_symbols=hardcode_symbols
+ constant_parameters=constant_parameters,
+ hardcode_symbols=hardcode_symbols,
)
self._process_compartments()
self._process_species()
@@ -581,7 +593,10 @@ def check_support(self) -> None:
# the "required" attribute is only available in SBML Level 3
for i_plugin in range(self.sbml.getNumPlugins()):
plugin = self.sbml.getPlugin(i_plugin)
- if self.sbml_doc.getPkgRequired(plugin.getPackageName()) is False:
+ if (
+ self.sbml_doc.getPkgRequired(plugin.getPackageName())
+ is False
+ ):
# if not "required", this has no impact on model
# simulation, and we can safely ignore it
@@ -597,7 +612,9 @@ def check_support(self) -> None:
raise SBMLException(
"The following fbc extension elements are "
"currently not supported: "
- + ", ".join(list(map(str, plugin.getListOfAllElements())))
+ + ", ".join(
+ list(map(str, plugin.getListOfAllElements()))
+ )
)
continue
@@ -669,7 +686,8 @@ def check_event_support(self) -> None:
trigger_sbml = event.getTrigger()
if trigger_sbml is None:
logger.warning(
- f"Event {event_id} trigger has no trigger, " "so will be skipped."
+ f"Event {event_id} trigger has no trigger, "
+ "so will be skipped."
)
continue
if trigger_sbml.getMath() is None:
@@ -696,7 +714,9 @@ def _gather_locals(self, hardcode_symbols: Sequence[str] = None) -> None:
self._gather_base_locals(hardcode_symbols=hardcode_symbols)
self._gather_dependent_locals()
- def _gather_base_locals(self, hardcode_symbols: Sequence[str] = None) -> None:
+ def _gather_base_locals(
+ self, hardcode_symbols: Sequence[str] = None
+ ) -> None:
"""
Populate self.local_symbols with pure symbol definitions that do not
depend on any other symbol.
@@ -741,7 +761,10 @@ def _gather_base_locals(self, hardcode_symbols: Sequence[str] = None) -> None:
for x_ref in _get_list_of_species_references(self.sbml):
if not x_ref.isSetId():
continue
- if x_ref.isSetStoichiometry() and not self.is_assignment_rule_target(x_ref):
+ if (
+ x_ref.isSetStoichiometry()
+ and not self.is_assignment_rule_target(x_ref)
+ ):
value = sp.Float(x_ref.getStoichiometry())
else:
value = _get_identifier_symbol(x_ref)
@@ -761,7 +784,8 @@ def _gather_dependent_locals(self):
if not r.isSetId():
continue
self.add_local_symbol(
- r.getId(), self._sympy_from_sbml_math(r.getKineticLaw() or sp.Float(0))
+ r.getId(),
+ self._sympy_from_sbml_math(r.getKineticLaw() or sp.Float(0)),
)
def add_local_symbol(self, key: str, value: sp.Expr):
@@ -819,7 +843,9 @@ def _process_species(self) -> None:
Get species information from SBML model.
"""
if self.sbml.isSetConversionFactor():
- conversion_factor = symbol_with_assumptions(self.sbml.getConversionFactor())
+ conversion_factor = symbol_with_assumptions(
+ self.sbml.getConversionFactor()
+ )
else:
conversion_factor = 1
@@ -831,7 +857,9 @@ def _process_species(self) -> None:
"compartment": _get_species_compartment_symbol(s),
"constant": s.getConstant() or s.getBoundaryCondition(),
"amount": s.getHasOnlySubstanceUnits(),
- "conversion_factor": symbol_with_assumptions(s.getConversionFactor())
+ "conversion_factor": symbol_with_assumptions(
+ s.getConversionFactor()
+ )
if s.isSetConversionFactor()
else conversion_factor,
"index": len(self.symbols[SymbolId.SPECIES]),
@@ -856,7 +884,9 @@ def _process_species_initial(self):
# targets to have InitialAssignments.
species = self.symbols[SymbolId.SPECIES].get(species_id, None)
- ia_initial = self._get_element_initial_assignment(species_variable.getId())
+ ia_initial = self._get_element_initial_assignment(
+ species_variable.getId()
+ )
if ia_initial is not None:
initial = ia_initial
if species:
@@ -869,12 +899,15 @@ def _process_species_initial(self):
all_rateof_dummies.append(rateof_dummies)
# don't assign this since they need to stay in order
- sorted_species = toposort_symbols(self.symbols[SymbolId.SPECIES], "init")
+ sorted_species = toposort_symbols(
+ self.symbols[SymbolId.SPECIES], "init"
+ )
for species, rateof_dummies in zip(
self.symbols[SymbolId.SPECIES].values(), all_rateof_dummies
):
species["init"] = _dummy_to_rateof(
- smart_subs_dict(species["init"], sorted_species, "init"), rateof_dummies
+ smart_subs_dict(species["init"], sorted_species, "init"),
+ rateof_dummies,
)
@log_execution_time("processing SBML rate rules", logger)
@@ -961,7 +994,9 @@ def add_d_dt(
variable0 = smart_subs(variable0, species_id, species["init"])
for species in self.symbols[SymbolId.SPECIES].values():
- species["init"] = smart_subs(species["init"], variable, variable0)
+ species["init"] = smart_subs(
+ species["init"], variable, variable0
+ )
# add compartment/parameter species
self.symbols[SymbolId.SPECIES][variable] = {
@@ -1028,7 +1063,8 @@ def _process_parameters(
]
for parameter in fixed_parameters:
if (
- self._get_element_initial_assignment(parameter.getId()) is not None
+ self._get_element_initial_assignment(parameter.getId())
+ is not None
or self.is_assignment_rule_target(parameter)
or self.is_rate_rule_target(parameter)
):
@@ -1069,8 +1105,12 @@ def _process_parameters(
for par in self.sbml.getListOfParameters():
if (
ia := self._get_element_initial_assignment(par.getId())
- ) is not None and ia.find(sp.core.function.UndefinedFunction("rateOf")):
- self.symbols[SymbolId.EXPRESSION][_get_identifier_symbol(par)] = {
+ ) is not None and ia.find(
+ sp.core.function.UndefinedFunction("rateOf")
+ ):
+ self.symbols[SymbolId.EXPRESSION][
+ _get_identifier_symbol(par)
+ ] = {
"name": par.getName() if par.isSetName() else par.getId(),
"value": ia,
}
@@ -1123,9 +1163,9 @@ def _process_reactions(self):
# rate of change in species concentration) now occurs
# in the `dx_dt` method in "de_export.py", which also
# accounts for possibly variable compartments.
- self.stoichiometric_matrix[species["index"], reaction_index] += (
- sign * stoichiometry * species["conversion_factor"]
- )
+ self.stoichiometric_matrix[
+ species["index"], reaction_index
+ ] += (sign * stoichiometry * species["conversion_factor"])
if reaction.isSetId():
sym_math = self._local_symbols[reaction.getId()]
else:
@@ -1201,9 +1241,9 @@ def _process_rule_algebraic(self, rule: sbml.AlgebraicRule):
continue
# and there must also not be a rate rule or assignment
# rule for it
- if self.is_assignment_rule_target(sbml_var) or self.is_rate_rule_target(
+ if self.is_assignment_rule_target(
sbml_var
- ):
+ ) or self.is_rate_rule_target(sbml_var):
continue
# Furthermore, if the entity is a Species object, its value
# must not be determined by reactions, which means that it
@@ -1217,10 +1257,15 @@ def _process_rule_algebraic(self, rule: sbml.AlgebraicRule):
)
is_involved_in_reaction = is_species and not smart_is_zero_matrix(
self.stoichiometric_matrix[
- list(self.symbols[SymbolId.SPECIES].keys()).index(symbol), :
+ list(self.symbols[SymbolId.SPECIES].keys()).index(symbol),
+ :,
]
)
- if is_species and not is_boundary_condition and is_involved_in_reaction:
+ if (
+ is_species
+ and not is_boundary_condition
+ and is_involved_in_reaction
+ ):
continue
free_variables.add(symbol)
@@ -1270,14 +1315,22 @@ def _process_rule_algebraic(self, rule: sbml.AlgebraicRule):
symbol["init"] = sp.Float(symbol.pop("value"))
# if not a species, add a zeros row to the stoichiometric
# matrix
- if (isinstance(symbol["init"], float) and np.isnan(symbol["init"])) or (
- isinstance(symbol["init"], sp.Number) and symbol["init"] == sp.nan
+ if (
+ isinstance(symbol["init"], float)
+ and np.isnan(symbol["init"])
+ ) or (
+ isinstance(symbol["init"], sp.Number)
+ and symbol["init"] == sp.nan
):
# placeholder, needs to be determined in IC calculation
symbol["init"] = sp.Float(0.0)
- self.stoichiometric_matrix = self.stoichiometric_matrix.row_insert(
- self.stoichiometric_matrix.shape[0],
- sp.SparseMatrix([[0] * self.stoichiometric_matrix.shape[1]]),
+ self.stoichiometric_matrix = (
+ self.stoichiometric_matrix.row_insert(
+ self.stoichiometric_matrix.shape[0],
+ sp.SparseMatrix(
+ [[0] * self.stoichiometric_matrix.shape[1]]
+ ),
+ )
)
elif var_ix != self.stoichiometric_matrix.shape[0] - 1:
# if not the last col, move it to the end
@@ -1353,7 +1406,9 @@ def _convert_event_assignment_parameter_targets_to_species(self):
This is for the convenience of only implementing event assignments for
"species".
"""
- parameter_targets = _collect_event_assignment_parameter_targets(self.sbml)
+ parameter_targets = _collect_event_assignment_parameter_targets(
+ self.sbml
+ )
for parameter_target in parameter_targets:
# Parameter rate rules already exist as species.
if parameter_target in self.symbols[SymbolId.SPECIES]:
@@ -1374,7 +1429,9 @@ def _convert_event_assignment_parameter_targets_to_species(self):
"Unexpected error. The parameter target of an "
"event assignment was processed twice."
)
- parameter_def = self.symbols[symbol_id].pop(parameter_target)
+ parameter_def = self.symbols[symbol_id].pop(
+ parameter_target
+ )
if parameter_def is None:
# this happens for parameters that have initial assignments
# or are assignment rule targets
@@ -1382,7 +1439,9 @@ def _convert_event_assignment_parameter_targets_to_species(self):
ia_init = self._get_element_initial_assignment(par.getId())
parameter_def = {
"name": par.getName() if par.isSetName() else par.getId(),
- "value": sp.Float(par.getValue()) if ia_init is None else ia_init,
+ "value": sp.Float(par.getValue())
+ if ia_init is None
+ else ia_init,
}
# Fixed parameters are added as species such that they can be
# targets of events.
@@ -1423,9 +1482,9 @@ def get_empty_bolus_value() -> sp.Float:
# Species has a compartment
"compartment" in species_def
):
- concentration_species_by_compartment[species_def["compartment"]].append(
- species
- )
+ concentration_species_by_compartment[
+ species_def["compartment"]
+ ].append(species)
for ievent, event in enumerate(events):
# get the event id (which is optional unfortunately)
@@ -1448,7 +1507,9 @@ def get_empty_bolus_value() -> sp.Float:
event_assignments = event.getListOfEventAssignments()
compartment_event_assignments = set()
for event_assignment in event_assignments:
- variable_sym = symbol_with_assumptions(event_assignment.getVariable())
+ variable_sym = symbol_with_assumptions(
+ event_assignment.getVariable()
+ )
if event_assignment.getMath() is None:
# Ignore event assignments with no change in value.
continue
@@ -1477,7 +1538,10 @@ def get_empty_bolus_value() -> sp.Float:
if variable_sym in concentration_species_by_compartment:
compartment_event_assignments.add(variable_sym)
- for comp, assignment in self.compartment_assignment_rules.items():
+ for (
+ comp,
+ assignment,
+ ) in self.compartment_assignment_rules.items():
if variable_sym not in assignment.free_symbols:
continue
compartment_event_assignments.add(comp)
@@ -1510,10 +1574,14 @@ def get_empty_bolus_value() -> sp.Float:
for index in range(len(bolus)):
if bolus[index] != get_empty_bolus_value():
bolus[index] -= state_vector[index]
- bolus[index] = bolus[index].subs(get_empty_bolus_value(), sp.Float(0.0))
+ bolus[index] = bolus[index].subs(
+ get_empty_bolus_value(), sp.Float(0.0)
+ )
initial_value = (
- trigger_sbml.getInitialValue() if trigger_sbml is not None else True
+ trigger_sbml.getInitialValue()
+ if trigger_sbml is not None
+ else True
)
if self.symbols[SymbolId.ALGEBRAIC_EQUATION] and not initial_value:
# in principle this could be implemented, requires running
@@ -1559,7 +1627,9 @@ def _process_observables(
See :py:func:`sbml2amici`.
"""
- _validate_observables(observables, sigmas, noise_distributions, events=False)
+ _validate_observables(
+ observables, sigmas, noise_distributions, events=False
+ )
# add user-provided observables or make all species, and compartments
# with assignment rules, observable
@@ -1581,7 +1651,9 @@ def _process_observables(
# check for nesting of observables (unsupported)
observable_syms = set(self.symbols[SymbolId.OBSERVABLE].keys())
for obs in self.symbols[SymbolId.OBSERVABLE].values():
- if any(sym in observable_syms for sym in obs["value"].free_symbols):
+ if any(
+ sym in observable_syms for sym in obs["value"].free_symbols
+ ):
raise ValueError(
"Nested observables are not supported, "
f"but observable `{obs['name']} = {obs['value']}` "
@@ -1590,7 +1662,9 @@ def _process_observables(
elif observables is None:
self._generate_default_observables()
- _check_symbol_nesting(self.symbols[SymbolId.OBSERVABLE], "eventObservable")
+ _check_symbol_nesting(
+ self.symbols[SymbolId.OBSERVABLE], "eventObservable"
+ )
self._process_log_likelihood(sigmas, noise_distributions)
@@ -1618,14 +1692,20 @@ def _process_event_observables(
return
_validate_observables(
- event_observables, event_sigmas, event_noise_distributions, events=True
+ event_observables,
+ event_sigmas,
+ event_noise_distributions,
+ events=True,
)
# gather local symbols before parsing observable and sigma formulas
for obs, definition in event_observables.items():
self.add_local_symbol(obs, symbol_with_assumptions(obs))
# check corresponding event exists
- if sp.Symbol(definition["event"]) not in self.symbols[SymbolId.EVENT]:
+ if (
+ sp.Symbol(definition["event"])
+ not in self.symbols[SymbolId.EVENT]
+ ):
raise ValueError(
"Could not find an event with the event identifier "
f'{definition["event"]} for the event observable with name'
@@ -1663,7 +1743,10 @@ def _process_event_observables(
event_sigmas, event_noise_distributions, events=True
)
self._process_log_likelihood(
- event_sigmas, event_noise_distributions, events=True, event_reg=True
+ event_sigmas,
+ event_noise_distributions,
+ events=True,
+ event_reg=True,
)
def _generate_default_observables(self):
@@ -1755,14 +1838,17 @@ def _process_log_likelihood(
self.symbols[sigma_symbol] = {
symbol_with_assumptions(f"sigma_{obs_id}"): {
"name": f'sigma_{obs["name"]}',
- "value": self._sympy_from_sbml_math(sigmas.get(str(obs_id), "1.0")),
+ "value": self._sympy_from_sbml_math(
+ sigmas.get(str(obs_id), "1.0")
+ ),
}
for obs_id, obs in self.symbols[obs_symbol].items()
}
self.symbols[llh_symbol] = {}
for (obs_id, obs), (sigma_id, sigma) in zip(
- self.symbols[obs_symbol].items(), self.symbols[sigma_symbol].items()
+ self.symbols[obs_symbol].items(),
+ self.symbols[sigma_symbol].items(),
):
symbol = symbol_with_assumptions(f"J{obs_id}")
dist = noise_distributions.get(str(obs_id), "normal")
@@ -1805,7 +1891,9 @@ def _process_initial_assignments(self):
continue
sym_math = self._make_initial(
- smart_subs_dict(sym_math, self.symbols[SymbolId.EXPRESSION], "value")
+ smart_subs_dict(
+ sym_math, self.symbols[SymbolId.EXPRESSION], "value"
+ )
)
self.initial_assignments[_get_identifier_symbol(ia)] = sym_math
@@ -1868,7 +1956,9 @@ def _make_initial(
if "init" in species:
sym_math = smart_subs(sym_math, species_id, species["init"])
- sym_math = smart_subs(sym_math, self._local_symbols["time"], sp.Float(0))
+ sym_math = smart_subs(
+ sym_math, self._local_symbols["time"], sp.Float(0)
+ )
sym_math = _dummy_to_rateof(sym_math, rateof_to_dummy)
@@ -1904,7 +1994,9 @@ def process_conservation_laws(self, ode_model) -> None:
# add algebraic variables to species_solver as they were ignored above
ndifferential = len(ode_model._differential_states)
nalgebraic = len(ode_model._algebraic_states)
- species_solver.extend(list(range(ndifferential, ndifferential + nalgebraic)))
+ species_solver.extend(
+ list(range(ndifferential, ndifferential + nalgebraic))
+ )
# Check, whether species_solver is empty now. As currently, AMICI
# cannot handle ODEs without species, CLs must be switched off in this
@@ -1914,7 +2006,9 @@ def process_conservation_laws(self, ode_model) -> None:
species_solver = list(range(ode_model.num_states_rdata()))
# prune out species from stoichiometry and
- self.stoichiometric_matrix = self.stoichiometric_matrix[species_solver, :]
+ self.stoichiometric_matrix = self.stoichiometric_matrix[
+ species_solver, :
+ ]
# add the found CLs to the ode_model
for cl in conservation_laws:
@@ -1934,9 +2028,13 @@ def _get_conservation_laws_demartino(
quantity (including the eliminated one)
(2) coefficients for the species in (1)
"""
- from .conserved_quantities_demartino import compute_moiety_conservation_laws
+ from .conserved_quantities_demartino import (
+ compute_moiety_conservation_laws,
+ )
- sm = self.stoichiometric_matrix[: len(self.symbols[SymbolId.SPECIES]), :]
+ sm = self.stoichiometric_matrix[
+ : len(self.symbols[SymbolId.SPECIES]), :
+ ]
try:
stoichiometric_list = [float(entry) for entry in sm.T.flat()]
@@ -1959,7 +2057,9 @@ def _get_conservation_laws_demartino(
stoichiometric_list,
*sm.shape,
rng_seed=32,
- species_names=[str(x.get_id()) for x in ode_model._differential_states],
+ species_names=[
+ str(x.get_id()) for x in ode_model._differential_states
+ ],
)
# Sparsify conserved quantities
@@ -1971,7 +2071,9 @@ def _get_conservation_laws_demartino(
# `A * x0 = total_cl` and bring it to reduced row echelon form. The
# pivot species are the ones to be eliminated. The resulting state
# expressions are sparse and void of any circular dependencies.
- A = sp.zeros(len(cls_coefficients), len(ode_model._differential_states))
+ A = sp.zeros(
+ len(cls_coefficients), len(ode_model._differential_states)
+ )
for i_cl, (cl, coefficients) in enumerate(
zip(cls_state_idxs, cls_coefficients)
):
@@ -1989,7 +2091,9 @@ def _get_conservation_laws_demartino(
)
return raw_cls
- def _get_conservation_laws_rref(self) -> List[Tuple[int, List[int], List[float]]]:
+ def _get_conservation_laws_rref(
+ self,
+ ) -> List[Tuple[int, List[int], List[float]]]:
"""Identify conservation laws based on left nullspace of the
stoichiometric matrix, computed through (numeric) Gaussian elimination
@@ -2006,7 +2110,9 @@ def _get_conservation_laws_rref(self) -> List[Tuple[int, List[int], List[float]]
try:
S = np.asarray(
- self.stoichiometric_matrix[: len(self.symbols[SymbolId.SPECIES]), :],
+ self.stoichiometric_matrix[
+ : len(self.symbols[SymbolId.SPECIES]), :
+ ],
dtype=float,
)
except TypeError:
@@ -2202,10 +2308,15 @@ def _replace_in_all_expressions(
if old not in self.symbols[symbol]:
continue
self.symbols[symbol] = {
- smart_subs(k, old, new): v for k, v in self.symbols[symbol].items()
+ smart_subs(k, old, new): v
+ for k, v in self.symbols[symbol].items()
}
- for symbol in [SymbolId.OBSERVABLE, SymbolId.LLHY, SymbolId.SIGMAY]:
+ for symbol in [
+ SymbolId.OBSERVABLE,
+ SymbolId.LLHY,
+ SymbolId.SIGMAY,
+ ]:
if old not in self.symbols[symbol]:
continue
self.symbols[symbol][new] = self.symbols[symbol][old]
@@ -2238,7 +2349,9 @@ def _replace_in_all_expressions(
**self.symbols[SymbolId.SPECIES],
**self.symbols[SymbolId.ALGEBRAIC_STATE],
}.values():
- state["init"] = smart_subs(state["init"], old, self._make_initial(new))
+ state["init"] = smart_subs(
+ state["init"], old, self._make_initial(new)
+ )
if "dt" in state:
state["dt"] = smart_subs(state["dt"], old, new)
@@ -2299,7 +2412,8 @@ def _sympy_from_sbml_math(
try:
try:
formula = sp.sympify(
- _parse_logical_operators(math_string), locals=self._local_symbols
+ _parse_logical_operators(math_string),
+ locals=self._local_symbols,
)
except TypeError as err:
if str(err) == "BooleanAtom not allowed in this context.":
@@ -2322,10 +2436,14 @@ def _sympy_from_sbml_math(
if isinstance(formula, sp.Expr):
formula = _parse_special_functions_sbml(formula)
- _check_unsupported_functions_sbml(formula, expression_type=ele_name)
+ _check_unsupported_functions_sbml(
+ formula, expression_type=ele_name
+ )
return formula
- def _get_element_initial_assignment(self, element_id: str) -> Union[sp.Expr, None]:
+ def _get_element_initial_assignment(
+ self, element_id: str
+ ) -> Union[sp.Expr, None]:
"""
Extract value of sbml variable according to its initial assignment
@@ -2420,7 +2538,8 @@ def _check_lib_sbml_errors(
error = sbml_doc.getError(i_error)
# we ignore any info messages for now
if error.getSeverity() >= sbml.LIBSBML_SEV_ERROR or (
- show_warnings and error.getSeverity() >= sbml.LIBSBML_SEV_WARNING
+ show_warnings
+ and error.getSeverity() >= sbml.LIBSBML_SEV_WARNING
):
logger.error(
f"libSBML {error.getCategoryAsString()} "
@@ -2429,7 +2548,9 @@ def _check_lib_sbml_errors(
)
if num_error + num_fatal:
- raise SBMLException("SBML Document failed to load (see error messages above)")
+ raise SBMLException(
+ "SBML Document failed to load (see error messages above)"
+ )
def _parse_event_trigger(trigger: sp.Expr) -> sp.Expr:
@@ -2450,13 +2571,17 @@ def _parse_event_trigger(trigger: sp.Expr) -> sp.Expr:
# convert relational expressions into trigger functions
if isinstance(
- trigger, (sp.core.relational.LessThan, sp.core.relational.StrictLessThan)
+ trigger,
+ (sp.core.relational.LessThan, sp.core.relational.StrictLessThan),
):
# y < x or y <= x
return -root
if isinstance(
trigger,
- (sp.core.relational.GreaterThan, sp.core.relational.StrictGreaterThan),
+ (
+ sp.core.relational.GreaterThan,
+ sp.core.relational.StrictGreaterThan,
+ ),
):
# y >= x or y > x
return root
@@ -2676,7 +2801,9 @@ def _check_unsupported_functions_sbml(
raise SBMLException(str(err))
-def _parse_special_functions_sbml(sym: sp.Expr, toplevel: bool = True) -> sp.Expr:
+def _parse_special_functions_sbml(
+ sym: sp.Expr, toplevel: bool = True
+) -> sp.Expr:
try:
return _parse_special_functions(sym, toplevel)
except RuntimeError as err:
diff --git a/python/sdist/amici/sbml_utils.py b/python/sdist/amici/sbml_utils.py
index cce2a6c4fa..66c9d01bbc 100644
--- a/python/sdist/amici/sbml_utils.py
+++ b/python/sdist/amici/sbml_utils.py
@@ -161,7 +161,9 @@ def add_species(
)
compartment_id = compartments[0].getId()
elif not model.getCompartment(compartment_id):
- raise SbmlMissingComponentIdError(f"No compartment with ID {compartment_id}.")
+ raise SbmlMissingComponentIdError(
+ f"No compartment with ID {compartment_id}."
+ )
sp = model.createSpecies()
if sp.setIdAttribute(species_id) != libsbml.LIBSBML_OPERATION_SUCCESS:
@@ -532,6 +534,8 @@ def _parse_logical_operators(
return math_str
if " xor(" in math_str or " Xor(" in math_str:
- raise SBMLException("Xor is currently not supported as logical " "operation.")
+ raise SBMLException(
+ "Xor is currently not supported as logical " "operation."
+ )
return (math_str.replace("&&", "&")).replace("||", "|")
diff --git a/python/sdist/amici/splines.py b/python/sdist/amici/splines.py
index bb82b692c6..fdb0912045 100644
--- a/python/sdist/amici/splines.py
+++ b/python/sdist/amici/splines.py
@@ -11,7 +11,17 @@
if TYPE_CHECKING:
from numbers import Real
- from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, Union
+ from typing import (
+ Any,
+ Callable,
+ Dict,
+ List,
+ Optional,
+ Sequence,
+ Set,
+ Tuple,
+ Union,
+ )
from . import sbml_import
@@ -112,19 +122,25 @@ def __init__(
stop = sp.nsimplify(sp.sympify(stop))
if step is None:
if number_of_nodes is None:
- raise ValueError("One of step/number_of_nodes must be specified!")
+ raise ValueError(
+ "One of step/number_of_nodes must be specified!"
+ )
if not isinstance(number_of_nodes, Integral):
raise TypeError("Length must be an integer!")
if number_of_nodes < 2:
raise ValueError("Length must be at least 2!")
step = (stop - start) / (number_of_nodes - 1)
elif number_of_nodes is not None:
- raise ValueError("Only one of step/number_of_nodes can be specified!")
+ raise ValueError(
+ "Only one of step/number_of_nodes can be specified!"
+ )
else:
step = sp.nsimplify(sp.sympify(step))
if start > stop:
- raise ValueError(f"Start point {start} greater than stop point {stop}!")
+ raise ValueError(
+ f"Start point {start} greater than stop point {stop}!"
+ )
if step <= 0:
raise ValueError(f"Step size {step} must be strictly positive!")
@@ -181,7 +197,8 @@ def __array__(self, dtype=None) -> np.ndarray:
def __repr__(self) -> str:
return (
- f"UniformGrid(start={self.start}, stop={self.stop}, " f"step={self.step})"
+ f"UniformGrid(start={self.start}, stop={self.stop}, "
+ f"step={self.step})"
)
@@ -301,7 +318,10 @@ def __init__(
if not isinstance(evaluate_at, sp.Basic):
# It may still be e.g. a list!
raise ValueError(f"Invalid evaluate_at = {evaluate_at}!")
- if evaluate_at != amici_time_symbol and evaluate_at != sbml_time_symbol:
+ if (
+ evaluate_at != amici_time_symbol
+ and evaluate_at != sbml_time_symbol
+ ):
logger.warning(
"At the moment AMICI only supports evaluate_at = (model time). "
"Annotations with correct piecewise MathML formulas "
@@ -311,7 +331,9 @@ def __init__(
if not isinstance(nodes, UniformGrid):
nodes = np.asarray([sympify_noeval(x) for x in nodes])
- values_at_nodes = np.asarray([sympify_noeval(y) for y in values_at_nodes])
+ values_at_nodes = np.asarray(
+ [sympify_noeval(y) for y in values_at_nodes]
+ )
if len(nodes) != len(values_at_nodes):
raise ValueError(
@@ -333,7 +355,10 @@ def __init__(
)
bc, extrapolate = self._normalize_bc_and_extrapolate(bc, extrapolate)
- if bc == ("periodic", "periodic") and values_at_nodes[0] != values_at_nodes[-1]:
+ if (
+ bc == ("periodic", "periodic")
+ and values_at_nodes[0] != values_at_nodes[-1]
+ ):
raise ValueError(
"If the spline is to be periodic, "
"the first and last elements of values_at_nodes must be equal!"
@@ -554,15 +579,21 @@ def check_if_valid(self, importer: sbml_import.SbmlImporter) -> None:
fixed_parameters: List[sp.Symbol] = list(
importer.symbols[SymbolId.FIXED_PARAMETER].keys()
)
- species: List[sp.Symbol] = list(importer.symbols[SymbolId.SPECIES].keys())
+ species: List[sp.Symbol] = list(
+ importer.symbols[SymbolId.SPECIES].keys()
+ )
for x in self.nodes:
if not x.free_symbols.issubset(fixed_parameters):
- raise ValueError("nodes should only depend on constant parameters!")
+ raise ValueError(
+ "nodes should only depend on constant parameters!"
+ )
for y in self.values_at_nodes:
if y.free_symbols.intersection(species):
- raise ValueError("values_at_nodes should not depend on model species!")
+ raise ValueError(
+ "values_at_nodes should not depend on model species!"
+ )
fixed_parameters_values = [
importer.symbols[SymbolId.FIXED_PARAMETER][fp]["value"]
@@ -575,7 +606,9 @@ def check_if_valid(self, importer: sbml_import.SbmlImporter) -> None:
if not np.all(np.diff(nodes_values) >= 0):
raise ValueError("nodes should be strictly increasing!")
- def poly(self, i: Integral, *, x: Union[Real, sp.Basic] = None) -> sp.Basic:
+ def poly(
+ self, i: Integral, *, x: Union[Real, sp.Basic] = None
+ ) -> sp.Basic:
"""
Get the polynomial interpolant on the ``(nodes[i], nodes[i+1])`` interval.
The polynomial is written in Horner form with respect to the scaled
@@ -623,7 +656,9 @@ def poly_variable(self, x: Union[Real, sp.Basic], i: Integral) -> sp.Basic:
return self._poly_variable(x, i)
@abstractmethod
- def _poly_variable(self, x: Union[Real, sp.Basic], i: Integral) -> sp.Basic:
+ def _poly_variable(
+ self, x: Union[Real, sp.Basic], i: Integral
+ ) -> sp.Basic:
"""This function (and not poly_variable) should be implemented by the
subclasses"""
raise NotImplementedError()
@@ -776,13 +811,17 @@ def _formula(
x = self._to_base_interval(x)
extr_left, extr_right = None, None
else:
- extr_left, extr_right = self._extrapolation_formulas(x, extrapolate)
+ extr_left, extr_right = self._extrapolation_formulas(
+ x, extrapolate
+ )
if extr_left is not None:
pieces.append((extr_left, x < self.nodes[0]))
for i in range(len(self.nodes) - 2):
- pieces.append((self.segment_formula(i, x=x), x < self.nodes[i + 1]))
+ pieces.append(
+ (self.segment_formula(i, x=x), x < self.nodes[i + 1])
+ )
if extr_right is not None:
pieces.append((self.segment_formula(-1, x=x), x < self.nodes[-1]))
@@ -818,7 +857,9 @@ def _to_base_interval(
"""For periodic splines, maps the real point `x` to the reference
period."""
if self.bc != ("periodic", "periodic"):
- raise ValueError("_to_base_interval makes no sense with non-periodic bc")
+ raise ValueError(
+ "_to_base_interval makes no sense with non-periodic bc"
+ )
xA = self.nodes[0]
xB = self.nodes[-1]
@@ -861,7 +902,9 @@ def squared_L2_norm_of_curvature(self) -> sp.Basic:
integral = sp.sympify(0)
for i in range(len(self.nodes) - 1):
formula = self.poly(i, x=x).diff(x, 2) ** 2
- integral += sp.integrate(formula, (x, self.nodes[i], self.nodes[i + 1]))
+ integral += sp.integrate(
+ formula, (x, self.nodes[i], self.nodes[i + 1])
+ )
return sp.simplify(integral)
def integrate(
@@ -892,7 +935,9 @@ def integrate(
return formula.integrate((x, z0, z1))
if k0 + 1 == k1:
- return formula.integrate((x, z0, xB)) + formula.integrate((x, xA, z1))
+ return formula.integrate((x, z0, xB)) + formula.integrate(
+ (x, xA, z1)
+ )
return (
formula.integrate((x, z0, xB))
@@ -969,7 +1014,9 @@ def _annotation_children(self) -> Dict[str, Union[str, List[str]]]:
assert amici_time_symbol not in x.free_symbols
children["spline_grid"] = [sbml_mathml(x) for x in self.nodes]
- children["spline_values"] = [sbml_mathml(y) for y in self.values_at_nodes]
+ children["spline_values"] = [
+ sbml_mathml(y) for y in self.values_at_nodes
+ ]
return children
@@ -1032,10 +1079,12 @@ def add_to_sbml_model(
# Autoadd parameters
if auto_add is True or auto_add == "spline":
- if not model.getParameter(str(self.sbml_id)) and not model.getSpecies(
+ if not model.getParameter(
str(self.sbml_id)
- ):
- add_parameter(model, self.sbml_id, constant=False, units=y_units)
+ ) and not model.getSpecies(str(self.sbml_id)):
+ add_parameter(
+ model, self.sbml_id, constant=False, units=y_units
+ )
if auto_add is True:
if isinstance(x_nominal, collections.abc.Sequence):
@@ -1046,7 +1095,9 @@ def add_to_sbml_model(
)
for i in range(len(x_nominal) - 1):
if x[i] >= x[i + 1]:
- raise ValueError("x_nominal must be strictly increasing!")
+ raise ValueError(
+ "x_nominal must be strictly increasing!"
+ )
elif x_nominal is None:
x_nominal = len(self.nodes) * [None]
else:
@@ -1055,7 +1106,9 @@ def add_to_sbml_model(
raise TypeError("x_nominal must be a Sequence!")
for _x, _val in zip(self.nodes, x_nominal):
if _x.is_Symbol and not model.getParameter(_x.name):
- add_parameter(model, _x.name, value=_val, units=x_units)
+ add_parameter(
+ model, _x.name, value=_val, units=x_units
+ )
if isinstance(y_nominal, collections.abc.Sequence):
if len(y_nominal) != len(self.values_at_nodes):
@@ -1109,7 +1162,9 @@ def add_to_sbml_model(
k = sp.Piecewise((3, sp.cos(s) < 0), (1, True))
formula = x0 + T * (sp.atan(sp.tan(s)) / (2 * sp.pi) + k / 4)
assert amici_time_symbol not in formula.free_symbols
- par = add_parameter(model, parameter_id, constant=False, units=x_units)
+ par = add_parameter(
+ model, parameter_id, constant=False, units=x_units
+ )
retcode = par.setAnnotation(
f''
)
@@ -1117,13 +1172,17 @@ def add_to_sbml_model(
raise SbmlAnnotationError("Could not set SBML annotation!")
add_assignment_rule(model, parameter_id, formula)
- def _replace_in_all_expressions(self, old: sp.Symbol, new: sp.Symbol) -> None:
+ def _replace_in_all_expressions(
+ self, old: sp.Symbol, new: sp.Symbol
+ ) -> None:
if self.sbml_id == old:
self._sbml_id = new
self._x = self.evaluate_at.subs(old, new)
if not isinstance(self.nodes, UniformGrid):
self._nodes = [x.subs(old, new) for x in self.nodes]
- self._values_at_nodes = [y.subs(old, new) for y in self.values_at_nodes]
+ self._values_at_nodes = [
+ y.subs(old, new) for y in self.values_at_nodes
+ ]
@staticmethod
def is_spline(rule: libsbml.AssignmentRule) -> bool:
@@ -1135,7 +1194,9 @@ def is_spline(rule: libsbml.AssignmentRule) -> bool:
return AbstractSpline.get_annotation(rule) is not None
@staticmethod
- def get_annotation(rule: libsbml.AssignmentRule) -> Union[ET.Element, None]:
+ def get_annotation(
+ rule: libsbml.AssignmentRule,
+ ) -> Union[ET.Element, None]:
"""
Extract AMICI spline annotation from an SBML assignment rule
(given as a :py:class:`libsbml.AssignmentRule` object).
@@ -1167,7 +1228,9 @@ def from_annotation(
must be hard-coded into this function here (at the moment).
"""
if annotation.tag != f"{{{annotation_namespace}}}spline":
- raise ValueError("The given annotation is not an AMICI spline annotation!")
+ raise ValueError(
+ "The given annotation is not an AMICI spline annotation!"
+ )
attributes = {}
for key, value in annotation.items():
@@ -1203,14 +1266,17 @@ def from_annotation(
if attributes["spline_method"] == "cubic_hermite":
cls = CubicHermiteSpline
else:
- raise ValueError(f"Unknown spline method {attributes['spline_method']}!")
+ raise ValueError(
+ f"Unknown spline method {attributes['spline_method']}!"
+ )
del attributes["spline_method"]
kwargs = cls._from_annotation(attributes, children)
if attributes:
raise ValueError(
- "Unprocessed attributes in spline annotation!\n" + str(attributes)
+ "Unprocessed attributes in spline annotation!\n"
+ + str(attributes)
)
if children:
@@ -1281,7 +1347,9 @@ def _from_annotation(
)
if "spline_values" not in children:
- raise ValueError("Required spline annotation 'spline_values' missing!")
+ raise ValueError(
+ "Required spline annotation 'spline_values' missing!"
+ )
kwargs["values_at_nodes"] = children.pop("spline_values")
return kwargs
@@ -1300,7 +1368,9 @@ def _parameters(self) -> Set[sp.Symbol]:
parameters.update(y.free_symbols)
return parameters
- def ode_model_symbol(self, importer: sbml_import.SbmlImporter) -> sp.Function:
+ def ode_model_symbol(
+ self, importer: sbml_import.SbmlImporter
+ ) -> sp.Function:
"""
Returns the `sympy` object to be used by
:py:class:`amici.de_export.ODEModel`.
@@ -1368,7 +1438,10 @@ def _eval_is_real(self):
return True
return AmiciSplineSensitivity(
- self.args[0], self.args[1], parameters[pindex], *self.args[2:]
+ self.args[0],
+ self.args[1],
+ parameters[pindex],
+ *self.args[2:],
)
def _eval_is_real(self):
@@ -1397,9 +1470,13 @@ def plot(
nodes = np.asarray(self.nodes)
xlim = (float(nodes[0]), float(nodes[-1]))
nodes = np.linspace(*xlim, npoints)
- ax.plot(nodes, [float(self.evaluate(x).subs(parameters)) for x in nodes])
ax.plot(
- self.nodes, [float(y.subs(parameters)) for y in self.values_at_nodes], "o"
+ nodes, [float(self.evaluate(x).subs(parameters)) for x in nodes]
+ )
+ ax.plot(
+ self.nodes,
+ [float(y.subs(parameters)) for y in self.values_at_nodes],
+ "o",
)
if xlabel is not None:
ax.set_xlabel(xlabel)
@@ -1500,7 +1577,9 @@ def __init__(
if not isinstance(nodes, UniformGrid):
nodes = np.asarray([sympify_noeval(x) for x in nodes])
- values_at_nodes = np.asarray([sympify_noeval(y) for y in values_at_nodes])
+ values_at_nodes = np.asarray(
+ [sympify_noeval(y) for y in values_at_nodes]
+ )
if len(nodes) != len(values_at_nodes):
# NB this would be checked in AbstractSpline.__init__()
@@ -1512,13 +1591,19 @@ def __init__(
)
bc, extrapolate = self._normalize_bc_and_extrapolate(bc, extrapolate)
- if bc[0] == "zeroderivative+natural" or bc[1] == "zeroderivative+natural":
+ if (
+ bc[0] == "zeroderivative+natural"
+ or bc[1] == "zeroderivative+natural"
+ ):
raise ValueError(
- "zeroderivative+natural bc not supported by " "CubicHermiteSplines!"
+ "zeroderivative+natural bc not supported by "
+ "CubicHermiteSplines!"
)
if derivatives_at_nodes is None:
- derivatives_at_nodes = _finite_differences(nodes, values_at_nodes, bc)
+ derivatives_at_nodes = _finite_differences(
+ nodes, values_at_nodes, bc
+ )
self._derivatives_by_fd = True
else:
derivatives_at_nodes = np.asarray(
@@ -1603,7 +1688,9 @@ def d_scaled(self, i: Integral) -> sp.Expr:
return self.derivatives_at_nodes[i] / self.values_at_nodes[i]
return self.derivatives_at_nodes[i]
- def _poly_variable(self, x: Union[Real, sp.Basic], i: Integral) -> sp.Basic:
+ def _poly_variable(
+ self, x: Union[Real, sp.Basic], i: Integral
+ ) -> sp.Basic:
assert 0 <= i < len(self.nodes) - 1
dx = self.nodes[i + 1] - self.nodes[i]
with evaluate(False):
@@ -1645,7 +1732,9 @@ def _parameters(self) -> Set[sp.Symbol]:
parameters.update(d.free_symbols)
return parameters
- def _replace_in_all_expressions(self, old: sp.Symbol, new: sp.Symbol) -> None:
+ def _replace_in_all_expressions(
+ self, old: sp.Symbol, new: sp.Symbol
+ ) -> None:
super()._replace_in_all_expressions(old, new)
self._derivatives_at_nodes = [
d.subs(old, new) for d in self.derivatives_at_nodes
@@ -1680,7 +1769,9 @@ def __str__(self) -> str:
return s + " [" + ", ".join(cmps) + "]"
-def _finite_differences(xx: np.ndarray, yy: np.ndarray, bc: NormalizedBC) -> np.ndarray:
+def _finite_differences(
+ xx: np.ndarray, yy: np.ndarray, bc: NormalizedBC
+) -> np.ndarray:
dd = []
if bc[0] == "periodic":
@@ -1701,7 +1792,11 @@ def _finite_differences(xx: np.ndarray, yy: np.ndarray, bc: NormalizedBC) -> np.
for i in range(1, len(xx) - 1):
dd.append(
_centered_fd(
- yy[i - 1], yy[i], yy[i + 1], xx[i] - xx[i - 1], xx[i + 1] - xx[i]
+ yy[i - 1],
+ yy[i],
+ yy[i + 1],
+ xx[i] - xx[i - 1],
+ xx[i + 1] - xx[i],
)
)
@@ -1715,7 +1810,9 @@ def _finite_differences(xx: np.ndarray, yy: np.ndarray, bc: NormalizedBC) -> np.
"At least 3 nodes are needed "
"for computing finite differences with natural bc!"
)
- fd = _natural_fd(yy[-1], xx[-2] - xx[-1], yy[-2], xx[-3] - xx[-2], yy[-3])
+ fd = _natural_fd(
+ yy[-1], xx[-2] - xx[-1], yy[-2], xx[-3] - xx[-2], yy[-3]
+ )
else:
fd = _onesided_fd(yy[-2], yy[-1], xx[-1] - xx[-2])
dd.append(fd)
diff --git a/python/sdist/amici/swig.py b/python/sdist/amici/swig.py
index bfb2964a3a..ef75646389 100644
--- a/python/sdist/amici/swig.py
+++ b/python/sdist/amici/swig.py
@@ -16,21 +16,29 @@ class TypeHintFixer(ast.NodeTransformer):
"size_t": ast.Name("int"),
"bool": ast.Name("bool"),
"std::unique_ptr< amici::Solver >": ast.Constant("Solver"),
- "amici::InternalSensitivityMethod": ast.Constant("InternalSensitivityMethod"),
+ "amici::InternalSensitivityMethod": ast.Constant(
+ "InternalSensitivityMethod"
+ ),
"amici::InterpolationType": ast.Constant("InterpolationType"),
"amici::LinearMultistepMethod": ast.Constant("LinearMultistepMethod"),
"amici::LinearSolver": ast.Constant("LinearSolver"),
"amici::Model *": ast.Constant("Model"),
"amici::Model const *": ast.Constant("Model"),
- "amici::NewtonDampingFactorMode": ast.Constant("NewtonDampingFactorMode"),
- "amici::NonlinearSolverIteration": ast.Constant("NonlinearSolverIteration"),
+ "amici::NewtonDampingFactorMode": ast.Constant(
+ "NewtonDampingFactorMode"
+ ),
+ "amici::NonlinearSolverIteration": ast.Constant(
+ "NonlinearSolverIteration"
+ ),
"amici::ObservableScaling": ast.Constant("ObservableScaling"),
"amici::ParameterScaling": ast.Constant("ParameterScaling"),
"amici::RDataReporting": ast.Constant("RDataReporting"),
"amici::SensitivityMethod": ast.Constant("SensitivityMethod"),
"amici::SensitivityOrder": ast.Constant("SensitivityOrder"),
"amici::Solver *": ast.Constant("Solver"),
- "amici::SteadyStateSensitivityMode": ast.Constant("SteadyStateSensitivityMode"),
+ "amici::SteadyStateSensitivityMode": ast.Constant(
+ "SteadyStateSensitivityMode"
+ ),
"amici::realtype": ast.Name("float"),
"DoubleVector": ast.Constant("Sequence[float]"),
"IntVector": ast.Name("Sequence[int]"),
@@ -72,7 +80,9 @@ def _new_annot(self, old_annot: str):
# std::vector value type
if (
value_type := re.sub(
- r"std::vector< (.*) >::value_type(?: const &)?", r"\1", old_annot
+ r"std::vector< (.*) >::value_type(?: const &)?",
+ r"\1",
+ old_annot,
)
) in self.mapping:
return self.mapping[value_type]
diff --git a/python/sdist/amici/swig_wrappers.py b/python/sdist/amici/swig_wrappers.py
index 50e78daf39..f56f3bd5d2 100644
--- a/python/sdist/amici/swig_wrappers.py
+++ b/python/sdist/amici/swig_wrappers.py
@@ -84,7 +84,9 @@ def _get_ptr(
def runAmiciSimulation(
- model: AmiciModel, solver: AmiciSolver, edata: Optional[AmiciExpData] = None
+ model: AmiciModel,
+ solver: AmiciSolver,
+ edata: Optional[AmiciExpData] = None,
) -> "numpy.ReturnDataView":
"""
Convenience wrapper around :py:func:`amici.amici.runAmiciSimulation`
@@ -105,7 +107,8 @@ def runAmiciSimulation(
"""
if (
model.ne > 0
- and solver.getSensitivityMethod() == amici_swig.SensitivityMethod.adjoint
+ and solver.getSensitivityMethod()
+ == amici_swig.SensitivityMethod.adjoint
and solver.getSensitivityOrder() == amici_swig.SensitivityOrder.first
):
warnings.warn(
@@ -166,7 +169,8 @@ def runAmiciSimulations(
"""
if (
model.ne > 0
- and solver.getSensitivityMethod() == amici_swig.SensitivityMethod.adjoint
+ and solver.getSensitivityMethod()
+ == amici_swig.SensitivityMethod.adjoint
and solver.getSensitivityOrder() == amici_swig.SensitivityOrder.first
):
warnings.warn(
@@ -309,7 +313,9 @@ def _log_simulation(rdata: amici_swig.ReturnData):
)
-def _ids_and_names_to_rdata(rdata: amici_swig.ReturnData, model: amici_swig.Model):
+def _ids_and_names_to_rdata(
+ rdata: amici_swig.ReturnData, model: amici_swig.Model
+):
"""Copy entity IDs and names from a Model to ReturnData."""
for entity_type in (
"State",
diff --git a/python/sdist/pyproject.toml b/python/sdist/pyproject.toml
index 3e77875ca1..011064fbdb 100644
--- a/python/sdist/pyproject.toml
+++ b/python/sdist/pyproject.toml
@@ -14,4 +14,4 @@ requires = [
build-backend = "setuptools.build_meta"
[tool.black]
-line-length = 80
+line-length = 79
diff --git a/python/tests/conftest.py b/python/tests/conftest.py
index 1c07ddacac..9ab64b91d7 100644
--- a/python/tests/conftest.py
+++ b/python/tests/conftest.py
@@ -41,7 +41,9 @@ def sbml_example_presimulation_module():
constant_parameters=constant_parameters,
)
- yield amici.import_model_module(module_name=module_name, module_path=outdir)
+ yield amici.import_model_module(
+ module_name=module_name, module_path=outdir
+ )
@pytest.fixture(scope="session")
diff --git a/python/tests/pysb_test_models/bngwiki_egfr_simple_deletemolecules.py b/python/tests/pysb_test_models/bngwiki_egfr_simple_deletemolecules.py
index 4723d3ac36..767c239c5d 100644
--- a/python/tests/pysb_test_models/bngwiki_egfr_simple_deletemolecules.py
+++ b/python/tests/pysb_test_models/bngwiki_egfr_simple_deletemolecules.py
@@ -76,7 +76,9 @@
)
# Transphosphorylation of EGFR by RTK
-Rule("egfr_transphos", EGFR(CR1=ANY, Y1068="U") >> EGFR(CR1=ANY, Y1068="P"), kp3)
+Rule(
+ "egfr_transphos", EGFR(CR1=ANY, Y1068="U") >> EGFR(CR1=ANY, Y1068="P"), kp3
+)
# Dephosphorylation
Rule("egfr_dephos", EGFR(Y1068="P") >> EGFR(Y1068="U"), km3)
diff --git a/python/tests/splines_utils.py b/python/tests/splines_utils.py
index e1c0c4352c..0746207ddb 100644
--- a/python/tests/splines_utils.py
+++ b/python/tests/splines_utils.py
@@ -267,7 +267,9 @@ def create_petab_problem(
problem.to_files(
model_file=os.path.join(folder, f"{model_name}_model.xml"),
condition_file=os.path.join(folder, f"{model_name}_conditions.tsv"),
- measurement_file=os.path.join(folder, f"{model_name}_measurements.tsv"),
+ measurement_file=os.path.join(
+ folder, f"{model_name}_measurements.tsv"
+ ),
parameter_file=os.path.join(folder, f"{model_name}_parameters.tsv"),
observable_file=os.path.join(folder, f"{model_name}_observables.tsv"),
yaml_file=os.path.join(folder, f"{model_name}.yaml"),
@@ -370,15 +372,24 @@ def simulate_splines(
)
if petab_problem is None and amici_model is not None:
- raise ValueError("if amici_model is given, petab_problem must be given too")
+ raise ValueError(
+ "if amici_model is given, petab_problem must be given too"
+ )
if petab_problem is not None and initial_values is None:
- raise ValueError("if petab_problem is given, initial_values must be given too")
+ raise ValueError(
+ "if petab_problem is given, initial_values must be given too"
+ )
if petab_problem is None:
# Create PEtab problem
path, initial_values, T = create_petab_problem(
- splines, params_true, initial_values, sigma=0.0, folder=folder, **kwargs
+ splines,
+ params_true,
+ initial_values,
+ sigma=0.0,
+ folder=folder,
+ **kwargs,
)
petab_problem = petab.Problem.from_yaml(path)
@@ -462,14 +473,18 @@ def simulate_splines(
)
-def compute_ground_truth(splines, initial_values, times, params_true, params_sorted):
+def compute_ground_truth(
+ splines, initial_values, times, params_true, params_sorted
+):
x_true_sym = sp.Matrix(
[
integrate_spline(spline, None, times, iv)
for (spline, iv) in zip(splines, initial_values)
]
).transpose()
- groundtruth = {"x_true": np.asarray(x_true_sym.subs(params_true), dtype=float)}
+ groundtruth = {
+ "x_true": np.asarray(x_true_sym.subs(params_true), dtype=float)
+ }
sx_by_state = [
x_true_sym[:, i].jacobian(params_sorted).subs(params_true)
for i in range(x_true_sym.shape[1])
@@ -567,7 +582,9 @@ def check_splines(
# Sort splines/ics/parameters as in the AMICI model
splines = [splines[species_to_index(name)] for name in state_ids]
- initial_values = [initial_values[species_to_index(name)] for name in state_ids]
+ initial_values = [
+ initial_values[species_to_index(name)] for name in state_ids
+ ]
def param_by_name(id):
for p in params_true.keys():
@@ -667,7 +684,9 @@ def param_by_name(id):
)
elif debug == "print":
sx_err_abs = abs(rdata["sx"] - sx_true)
- sx_err_rel = np.where(sx_err_abs == 0, 0, sx_err_abs / abs(sx_true))
+ sx_err_rel = np.where(
+ sx_err_abs == 0, 0, sx_err_abs / abs(sx_true)
+ )
print(f"sx_atol={sx_atol} sx_rtol={sx_rtol}")
print("sx_err_abs:")
print(np.squeeze(sx_err_abs))
@@ -696,7 +715,9 @@ def param_by_name(id):
if sllh_atol is None:
sllh_atol = np.finfo(float).eps
sllh_err_abs = abs(sllh).max()
- if (sllh_err_abs > sllh_atol and debug is not True) or debug == "print":
+ if (
+ sllh_err_abs > sllh_atol and debug is not True
+ ) or debug == "print":
print(f"sllh_atol={sllh_atol}")
print(f"sllh_err_abs = {sllh_err_abs}")
if not debug:
@@ -705,7 +726,11 @@ def param_by_name(id):
assert sllh is None
# Try different parameter lists
- if not skip_sensitivity and (not use_adjoint) and parameter_lists is not None:
+ if (
+ not skip_sensitivity
+ and (not use_adjoint)
+ and parameter_lists is not None
+ ):
for plist in parameter_lists:
amici_model.setParameterList(plist)
amici_model.setTimepoints(rdata.t)
@@ -884,7 +909,11 @@ def example_spline_1(
params[yy[i]] = yy_true[i]
spline = CubicHermiteSpline(
- f"y{idx}", nodes=xx, values_at_nodes=yy, bc=None, extrapolate=extrapolate
+ f"y{idx}",
+ nodes=xx,
+ values_at_nodes=yy,
+ bc=None,
+ extrapolate=extrapolate,
)
if os.name == "nt":
@@ -911,7 +940,11 @@ def example_spline_2(idx: int = 0):
yy.append(yy[0])
params = dict(zip(yy, yy_true))
spline = CubicHermiteSpline(
- f"y{idx}", nodes=xx, values_at_nodes=yy, bc="periodic", extrapolate="periodic"
+ f"y{idx}",
+ nodes=xx,
+ values_at_nodes=yy,
+ bc="periodic",
+ extrapolate="periodic",
)
tols = (
dict(llh_rtol=1e-15),
diff --git a/python/tests/test_compare_conservation_laws_sbml.py b/python/tests/test_compare_conservation_laws_sbml.py
index 79a26fd948..4d6a453b52 100644
--- a/python/tests/test_compare_conservation_laws_sbml.py
+++ b/python/tests/test_compare_conservation_laws_sbml.py
@@ -28,7 +28,9 @@ def edata_fixture():
2,
0,
0,
- np.array([0.0, 0.0, 0.0, 1.0, 2.0, 2.0, 4.0, float("inf"), float("inf")]),
+ np.array(
+ [0.0, 0.0, 0.0, 1.0, 2.0, 2.0, 4.0, float("inf"), float("inf")]
+ ),
)
edata_full.setObservedData([3.14] * 18)
edata_full.fixedParameters = np.array([1.0, 2.0])
@@ -129,7 +131,9 @@ def test_compare_conservation_laws_sbml(models, edata_fixture):
assert model_without_cl.nx_rdata == model_with_cl.nx_rdata
assert model_with_cl.nx_solver < model_without_cl.nx_solver
assert len(model_with_cl.getStateIdsSolver()) == model_with_cl.nx_solver
- assert len(model_without_cl.getStateIdsSolver()) == model_without_cl.nx_solver
+ assert (
+ len(model_without_cl.getStateIdsSolver()) == model_without_cl.nx_solver
+ )
# ----- compare simulations wo edata, sensi = 0, states ------------------
# run simulations
@@ -140,7 +144,11 @@ def test_compare_conservation_laws_sbml(models, edata_fixture):
# compare state trajectories
assert_allclose(
- rdata["x"], rdata_cl["x"], rtol=1.0e-5, atol=1.0e-8, err_msg="rdata.x mismatch"
+ rdata["x"],
+ rdata_cl["x"],
+ rtol=1.0e-5,
+ atol=1.0e-8,
+ err_msg="rdata.x mismatch",
)
# ----- compare simulations wo edata, sensi = 1, states and sensis -------
@@ -254,9 +262,15 @@ def test_adjoint_pre_and_post_equilibration(models, edata_fixture):
)
# assert all are close
- assert_allclose(rff_cl["sllh"], rfa_cl["sllh"], rtol=1.0e-5, atol=1.0e-8)
- assert_allclose(rfa_cl["sllh"], raa_cl["sllh"], rtol=1.0e-5, atol=1.0e-8)
- assert_allclose(raa_cl["sllh"], rff_cl["sllh"], rtol=1.0e-5, atol=1.0e-8)
+ assert_allclose(
+ rff_cl["sllh"], rfa_cl["sllh"], rtol=1.0e-5, atol=1.0e-8
+ )
+ assert_allclose(
+ rfa_cl["sllh"], raa_cl["sllh"], rtol=1.0e-5, atol=1.0e-8
+ )
+ assert_allclose(
+ raa_cl["sllh"], rff_cl["sllh"], rtol=1.0e-5, atol=1.0e-8
+ )
# compare fully adjoint approach to simulation with singular
# Jacobian
diff --git a/python/tests/test_conserved_quantities_demartino.py b/python/tests/test_conserved_quantities_demartino.py
index 3c67d0145a..339743cc4e 100644
--- a/python/tests/test_conserved_quantities_demartino.py
+++ b/python/tests/test_conserved_quantities_demartino.py
@@ -7,7 +7,9 @@
import sympy as sp
from amici.conserved_quantities_demartino import _fill, _kernel
from amici.conserved_quantities_demartino import _output as output
-from amici.conserved_quantities_demartino import compute_moiety_conservation_laws
+from amici.conserved_quantities_demartino import (
+ compute_moiety_conservation_laws,
+)
from amici.logging import get_logger, log_execution_time
from amici.testing import skip_on_valgrind
@@ -165,7 +167,8 @@ def data_demartino2014():
S = [
int(item)
for sl in [
- entry.decode("ascii").strip().split("\t") for entry in data.readlines()
+ entry.decode("ascii").strip().split("\t")
+ for entry in data.readlines()
]
for item in sl
]
@@ -175,7 +178,9 @@ def data_demartino2014():
r"https://github.com/AMICI-dev/AMICI/files/11430970/test-ecoli-met.txt",
timeout=10,
)
- row_names = [entry.decode("ascii").strip() for entry in io.BytesIO(response.read())]
+ row_names = [
+ entry.decode("ascii").strip() for entry in io.BytesIO(response.read())
+ ]
return S, row_names
@@ -192,7 +197,9 @@ def test_kernel_demartino2014(data_demartino2014, quiet=True):
), "Unexpected dimension of stoichiometric matrix"
# Expected number of metabolites per conservation law found after kernel()
- expected_num_species = [53] + [2] * 11 + [6] + [3] * 2 + [2] * 15 + [3] + [2] * 5
+ expected_num_species = (
+ [53] + [2] * 11 + [6] + [3] * 2 + [2] * 15 + [3] + [2] * 5
+ )
(
kernel_dim,
@@ -220,7 +227,9 @@ def test_kernel_demartino2014(data_demartino2014, quiet=True):
assert (
engaged_species == demartino2014_kernel_engaged_species
), "Wrong engaged metabolites reported"
- assert len(conserved_moieties) == 128, "Wrong number of conserved moieties reported"
+ assert (
+ len(conserved_moieties) == 128
+ ), "Wrong number of conserved moieties reported"
# Assert that each conserved moiety has the correct number of metabolites
for i in range(int_kernel_dim - 2):
@@ -768,7 +777,9 @@ def test_fill_demartino2014(data_demartino2014):
assert not any(fields[len(ref_for_fields) :])
-def compute_moiety_conservation_laws_demartino2014(data_demartino2014, quiet=False):
+def compute_moiety_conservation_laws_demartino2014(
+ data_demartino2014, quiet=False
+):
"""Compute conserved quantities for De Martino's published results
for E. coli network"""
stoichiometric_list, row_names = data_demartino2014
@@ -781,7 +792,9 @@ def compute_moiety_conservation_laws_demartino2014(data_demartino2014, quiet=Fal
start = perf_counter()
cls_state_idxs, cls_coefficients = compute_moiety_conservation_laws(
- stoichiometric_list, num_species=num_species, num_reactions=num_reactions
+ stoichiometric_list,
+ num_species=num_species,
+ num_reactions=num_reactions,
)
runtime = perf_counter() - start
if not quiet:
@@ -795,7 +808,9 @@ def compute_moiety_conservation_laws_demartino2014(data_demartino2014, quiet=Fal
def test_compute_moiety_conservation_laws_demartino2014(data_demartino2014):
"""Invoke test case and benchmarking for De Martino's published results
for E. coli network"""
- compute_moiety_conservation_laws_demartino2014(data_demartino2014, quiet=False)
+ compute_moiety_conservation_laws_demartino2014(
+ data_demartino2014, quiet=False
+ )
@skip_on_valgrind
@@ -826,7 +841,9 @@ def test_compute_moiety_conservation_laws_simple():
stoichiometric_matrix = sp.Matrix(
[[-1.0, 1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, -1.0]]
)
- stoichiometric_list = [float(entry) for entry in stoichiometric_matrix.T.flat()]
+ stoichiometric_list = [
+ float(entry) for entry in stoichiometric_matrix.T.flat()
+ ]
num_tries = 1000
found_all_n_times = 0
diff --git a/python/tests/test_edata.py b/python/tests/test_edata.py
index c2d2ea470b..9c4d9b9edc 100644
--- a/python/tests/test_edata.py
+++ b/python/tests/test_edata.py
@@ -16,7 +16,9 @@ def test_edata_sensi_unscaling(model_units_module):
sx0 = (3, 3, 3, 3)
- parameter_scales_log10 = [amici.ParameterScaling.log10.value] * len(parameters0)
+ parameter_scales_log10 = [amici.ParameterScaling.log10.value] * len(
+ parameters0
+ )
amici_parameter_scales_log10 = amici.parameterScalingFromIntVector(
parameter_scales_log10
)
diff --git a/python/tests/test_events.py b/python/tests/test_events.py
index c562f3c4fc..d2a177bded 100644
--- a/python/tests/test_events.py
+++ b/python/tests/test_events.py
@@ -15,10 +15,15 @@
@pytest.fixture(
params=[
pytest.param("events_plus_heavisides", marks=skip_on_valgrind),
- pytest.param("piecewise_plus_event_simple_case", marks=skip_on_valgrind),
- pytest.param("piecewise_plus_event_semi_complicated", marks=skip_on_valgrind),
pytest.param(
- "piecewise_plus_event_trigger_depends_on_state", marks=skip_on_valgrind
+ "piecewise_plus_event_simple_case", marks=skip_on_valgrind
+ ),
+ pytest.param(
+ "piecewise_plus_event_semi_complicated", marks=skip_on_valgrind
+ ),
+ pytest.param(
+ "piecewise_plus_event_trigger_depends_on_state",
+ marks=skip_on_valgrind,
),
pytest.param("nested_events", marks=skip_on_valgrind),
pytest.param("event_state_dep_ddeltax_dtpx", marks=skip_on_valgrind),
@@ -72,7 +77,9 @@ def get_model_definition(model_name):
if model_name == "event_state_dep_ddeltax_dtpx":
return model_definition_event_state_dep_ddeltax_dtpx()
- raise NotImplementedError(f"Model with name {model_name} is not implemented.")
+ raise NotImplementedError(
+ f"Model with name {model_name} is not implemented."
+ )
def model_definition_events_plus_heavisides():
@@ -289,7 +296,9 @@ def x_expected(t, k1, k2, inflow_1, decay_1, decay_2, bolus):
def get_early_x(t):
# compute dynamics before event
- x_1 = equil * (1 - np.exp(-decay_1 * t)) + k1 * np.exp(-decay_1 * t)
+ x_1 = equil * (1 - np.exp(-decay_1 * t)) + k1 * np.exp(
+ -decay_1 * t
+ )
x_2 = k2 * np.exp(-decay_2 * t)
return np.array([[x_1], [x_2]])
@@ -303,9 +312,9 @@ def get_early_x(t):
# compute dynamics after event
inhom = np.exp(decay_1 * event_time) * tau_x1
- x_1 = equil * (1 - np.exp(decay_1 * (event_time - t))) + inhom * np.exp(
- -decay_1 * t
- )
+ x_1 = equil * (
+ 1 - np.exp(decay_1 * (event_time - t))
+ ) + inhom * np.exp(-decay_1 * t)
x_2 = tau_x2 * np.exp(decay_2 * event_time) * np.exp(-decay_2 * t)
x = np.array([[x_1], [x_2]])
@@ -360,7 +369,11 @@ def model_definition_piecewise_plus_event_simple_case():
}
timepoints = np.linspace(0.0, 5.0, 100) # np.array((0.0, 4.0,))
events = {
- "event_1": {"trigger": "time > alpha", "target": "x_1", "assignment": "gamma"},
+ "event_1": {
+ "trigger": "time > alpha",
+ "target": "x_1",
+ "assignment": "gamma",
+ },
"event_2": {
"trigger": "time > beta",
"target": "x_1",
@@ -458,7 +471,8 @@ def x_expected(t, x_1_0, alpha, beta, gamma, delta):
else:
# after third event triggered
x = (
- ((x_1_0 + alpha) * alpha + (beta - alpha)) * delta + (gamma - beta)
+ ((x_1_0 + alpha) * alpha + (beta - alpha)) * delta
+ + (gamma - beta)
) ** 2 * 2 + (t - gamma)
return np.array((x,))
@@ -541,7 +555,9 @@ def x_expected(t, x_1_0, x_2_0, alpha, beta, gamma, delta, eta):
x_1 = x_1_heaviside_1 * np.exp(delta * (t - heaviside_1))
else:
x_1_heaviside_1 = gamma * np.exp(-(heaviside_1 - t_event_1))
- x_1_at_event_2 = x_1_heaviside_1 * np.exp(delta * (t_event_2 - heaviside_1))
+ x_1_at_event_2 = x_1_heaviside_1 * np.exp(
+ delta * (t_event_2 - heaviside_1)
+ )
x_2_at_event_2 = x_2_0 * np.exp(-eta * t_event_2)
x1_after_event_2 = x_1_at_event_2 + x_2_at_event_2
x_1 = x1_after_event_2 * np.exp(-(t - t_event_2))
@@ -666,8 +682,12 @@ def sx_expected(t, parameters):
def test_models(model):
amici_model, parameters, timepoints, x_expected, sx_expected = model
- result_expected_x = np.array([x_expected(t, **parameters) for t in timepoints])
- result_expected_sx = np.array([sx_expected(t, parameters) for t in timepoints])
+ result_expected_x = np.array(
+ [x_expected(t, **parameters) for t in timepoints]
+ )
+ result_expected_sx = np.array(
+ [sx_expected(t, parameters) for t in timepoints]
+ )
# assert correctness of trajectories
check_trajectories_without_sensitivities(amici_model, result_expected_x)
diff --git a/python/tests/test_hdf5.py b/python/tests/test_hdf5.py
index c47d8653eb..232f22be8c 100644
--- a/python/tests/test_hdf5.py
+++ b/python/tests/test_hdf5.py
@@ -32,7 +32,9 @@ def _modify_solver_attrs(solver):
getattr(solver, attr)(cval)
-@pytest.mark.skipif(not amici.hdf5_enabled, reason="AMICI was compiled without HDF5")
+@pytest.mark.skipif(
+ not amici.hdf5_enabled, reason="AMICI was compiled without HDF5"
+)
def test_solver_hdf5_roundtrip(sbml_example_presimulation_module):
"""TestCase class for AMICI HDF5 I/O"""
diff --git a/python/tests/test_heavisides.py b/python/tests/test_heavisides.py
index 4cef7723a6..c3bea26a0c 100644
--- a/python/tests/test_heavisides.py
+++ b/python/tests/test_heavisides.py
@@ -53,8 +53,12 @@ def model(request):
def test_models(model):
amici_model, parameters, timepoints, x_expected, sx_expected = model
- result_expected_x = np.array([x_expected(t, **parameters) for t in timepoints])
- result_expected_sx = np.array([sx_expected(t, **parameters) for t in timepoints])
+ result_expected_x = np.array(
+ [x_expected(t, **parameters) for t in timepoints]
+ )
+ result_expected_sx = np.array(
+ [sx_expected(t, **parameters) for t in timepoints]
+ )
# Does the AMICI simulation match the analytical solution?
check_trajectories_without_sensitivities(amici_model, result_expected_x)
@@ -71,7 +75,9 @@ def get_model_definition(model_name):
elif model_name == "piecewise_many_conditions":
return model_definition_piecewise_many_conditions()
else:
- raise NotImplementedError(f"Model with name {model_name} is not implemented.")
+ raise NotImplementedError(
+ f"Model with name {model_name} is not implemented."
+ )
def model_definition_state_and_parameter_dependent_heavisides():
@@ -136,8 +142,12 @@ def sx_expected(t, alpha, beta, gamma, delta, eta, zeta):
sx_1_zeta = np.exp(alpha * t)
else:
# Never trust Wolfram Alpha...
- sx_1_alpha = zeta * tau_1 * np.exp(alpha * tau_1 - beta * (t - tau_1))
- sx_1_beta = zeta * (tau_1 - t) * np.exp(alpha * tau_1 - beta * (t - tau_1))
+ sx_1_alpha = (
+ zeta * tau_1 * np.exp(alpha * tau_1 - beta * (t - tau_1))
+ )
+ sx_1_beta = (
+ zeta * (tau_1 - t) * np.exp(alpha * tau_1 - beta * (t - tau_1))
+ )
sx_1_gamma = (
zeta
* (alpha + beta)
@@ -176,8 +186,22 @@ def sx_expected(t, alpha, beta, gamma, delta, eta, zeta):
sx_2_delta = gamma * np.exp(gamma * delta) - eta
sx_2_eta = t - delta
- sx_1 = (sx_1_alpha, sx_1_beta, sx_1_gamma, sx_1_delta, sx_1_eta, sx_1_zeta)
- sx_2 = (sx_2_alpha, sx_2_beta, sx_2_gamma, sx_2_delta, sx_2_eta, sx_2_zeta)
+ sx_1 = (
+ sx_1_alpha,
+ sx_1_beta,
+ sx_1_gamma,
+ sx_1_delta,
+ sx_1_eta,
+ sx_1_zeta,
+ )
+ sx_2 = (
+ sx_2_alpha,
+ sx_2_beta,
+ sx_2_gamma,
+ sx_2_delta,
+ sx_2_eta,
+ sx_2_zeta,
+ )
return np.array((sx_1, sx_2)).transpose()
diff --git a/python/tests/test_misc.py b/python/tests/test_misc.py
index 331c806623..5a88fda6f8 100644
--- a/python/tests/test_misc.py
+++ b/python/tests/test_misc.py
@@ -7,7 +7,11 @@
import amici
import pytest
import sympy as sp
-from amici.de_export import _custom_pow_eval_derivative, _monkeypatched, smart_subs_dict
+from amici.de_export import (
+ _custom_pow_eval_derivative,
+ _monkeypatched,
+ smart_subs_dict,
+)
from amici.testing import skip_on_valgrind
@@ -71,7 +75,11 @@ def test_cmake_compilation(sbml_example_presimulation_module):
try:
subprocess.run(
- cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ cmd,
+ shell=True,
+ check=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
)
except subprocess.CalledProcessError as e:
print(e.stdout.decode())
@@ -111,7 +119,9 @@ def test_monkeypatch():
assert (t**n).diff(t).subs(vals) is sp.nan
# check that we can monkeypatch it out
- with _monkeypatched(sp.Pow, "_eval_derivative", _custom_pow_eval_derivative):
+ with _monkeypatched(
+ sp.Pow, "_eval_derivative", _custom_pow_eval_derivative
+ ):
assert (t**n).diff(t).subs(vals) is not sp.nan
# check that the monkeypatch is transient
diff --git a/python/tests/test_observable_events.py b/python/tests/test_observable_events.py
index 83a7b94c7e..2887308ff6 100644
--- a/python/tests/test_observable_events.py
+++ b/python/tests/test_observable_events.py
@@ -62,7 +62,9 @@ def model_neuron_def():
}
}
- event_observables = {"z1": {"name": "z1", "event": "event_1", "formula": "time"}}
+ event_observables = {
+ "z1": {"name": "z1", "event": "event_1", "formula": "time"}
+ }
return (
initial_assignments,
parameters,
@@ -210,7 +212,9 @@ def run_test_cases(model):
edata = None
if "data" in expected_results[model.getName()][case].keys():
edata = amici.readSimulationExpData(
- str(expected_results_file), f"/{model_name}/{case}/data", model.get()
+ str(expected_results_file),
+ f"/{model_name}/{case}/data",
+ model.get(),
)
rdata = amici.runAmiciSimulation(model, solver, edata)
diff --git a/python/tests/test_pandas.py b/python/tests/test_pandas.py
index e904fce7cc..21c58bcaff 100644
--- a/python/tests/test_pandas.py
+++ b/python/tests/test_pandas.py
@@ -49,8 +49,12 @@ def test_pandas_import_export(sbml_example_presimulation_module, case):
assert case[fp] == getattr(edata_reconstructed[0], fp)
else:
- assert model.getFixedParameters() == getattr(edata_reconstructed[0], fp)
+ assert model.getFixedParameters() == getattr(
+ edata_reconstructed[0], fp
+ )
- assert model.getFixedParameters() == getattr(edata_reconstructed[0], fp)
+ assert model.getFixedParameters() == getattr(
+ edata_reconstructed[0], fp
+ )
assert getattr(edata[0], fp) == case[fp]
diff --git a/python/tests/test_parameter_mapping.py b/python/tests/test_parameter_mapping.py
index 32ddf93103..ae66e23f53 100644
--- a/python/tests/test_parameter_mapping.py
+++ b/python/tests/test_parameter_mapping.py
@@ -2,7 +2,10 @@
import os
import pytest
-from amici.parameter_mapping import ParameterMapping, ParameterMappingForCondition
+from amici.parameter_mapping import (
+ ParameterMapping,
+ ParameterMappingForCondition,
+)
from amici.testing import skip_on_valgrind
@@ -25,16 +28,25 @@ def test_parameter_mapping_for_condition_default_args():
map_preeq_fix = {"sim_par2": "opt_par1"}
map_sim_fix = {"sim_par2": "opt_par2"}
par_map_for_condition = ParameterMappingForCondition(
- map_sim_var=map_sim_var, map_preeq_fix=map_preeq_fix, map_sim_fix=map_sim_fix
+ map_sim_var=map_sim_var,
+ map_preeq_fix=map_preeq_fix,
+ map_sim_fix=map_sim_fix,
)
expected_scale_map_sim_var = {"sim_par0": "lin", "sim_par1": "lin"}
expected_scale_map_preeq_fix = {"sim_par2": "lin"}
expected_scale_map_sim_fix = {"sim_par2": "lin"}
- assert par_map_for_condition.scale_map_sim_var == expected_scale_map_sim_var
- assert par_map_for_condition.scale_map_preeq_fix == expected_scale_map_preeq_fix
- assert par_map_for_condition.scale_map_sim_fix == expected_scale_map_sim_fix
+ assert (
+ par_map_for_condition.scale_map_sim_var == expected_scale_map_sim_var
+ )
+ assert (
+ par_map_for_condition.scale_map_preeq_fix
+ == expected_scale_map_preeq_fix
+ )
+ assert (
+ par_map_for_condition.scale_map_sim_fix == expected_scale_map_sim_fix
+ )
@skip_on_valgrind
@@ -48,7 +60,9 @@ def test_parameter_mapping():
map_preeq_fix = {"sim_par2": "opt_par1"}
map_sim_fix = {"sim_par2": "opt_par2"}
par_map_for_condition = ParameterMappingForCondition(
- map_sim_var=map_sim_var, map_preeq_fix=map_preeq_fix, map_sim_fix=map_sim_fix
+ map_sim_var=map_sim_var,
+ map_preeq_fix=map_preeq_fix,
+ map_sim_fix=map_sim_fix,
)
parameter_mapping.append(par_map_for_condition)
diff --git a/python/tests/test_petab_import.py b/python/tests/test_petab_import.py
index f6db30f18a..fc978b76ea 100644
--- a/python/tests/test_petab_import.py
+++ b/python/tests/test_petab_import.py
@@ -60,7 +60,9 @@ def test_get_fixed_parameters(simple_sbml_model):
)
)
parameter_df = petab.get_parameter_df(
- pd.DataFrame({petab.PARAMETER_ID: ["p3", "p4"], petab.ESTIMATE: [0, 1]})
+ pd.DataFrame(
+ {petab.PARAMETER_ID: ["p3", "p4"], petab.ESTIMATE: [0, 1]}
+ )
)
print(condition_df)
print(parameter_df)
@@ -122,7 +124,9 @@ def test_default_output_parameters(simple_sbml_model):
)
assert (
1.0
- == sbml_importer.sbml.getParameter("observableParameter1_obs1").getValue()
+ == sbml_importer.sbml.getParameter(
+ "observableParameter1_obs1"
+ ).getValue()
)
with pytest.raises(ValueError):
diff --git a/python/tests/test_petab_objective.py b/python/tests/test_petab_objective.py
index 1b2436ceab..e31e693d11 100755
--- a/python/tests/test_petab_objective.py
+++ b/python/tests/test_petab_objective.py
@@ -50,7 +50,9 @@ def test_simulate_petab_sensitivities(lotka_volterra):
for scaled_gradients in [True, False]:
_problem_parameters = problem_parameters.copy()
if scaled_parameters:
- _problem_parameters = petab_problem.scale_parameters(problem_parameters)
+ _problem_parameters = petab_problem.scale_parameters(
+ problem_parameters
+ )
results[(scaled_parameters, scaled_gradients)] = pd.Series(
amici.petab_objective.simulate_petab(
petab_problem=petab_problem,
diff --git a/python/tests/test_petab_simulate.py b/python/tests/test_petab_simulate.py
index 385f98e05e..febea5fd50 100644
--- a/python/tests/test_petab_simulate.py
+++ b/python/tests/test_petab_simulate.py
@@ -53,7 +53,9 @@ def test_subset_call(petab_problem):
simulator0 = PetabSimulator(petab_problem)
assert not (Path(model_output_dir) / model_name).is_dir()
- simulator0.simulate(model_name=model_name, model_output_dir=model_output_dir)
+ simulator0.simulate(
+ model_name=model_name, model_output_dir=model_output_dir
+ )
# Model name is handled correctly
assert simulator0.amici_model.getName() == model_name
# Check model output directory is created, by
diff --git a/python/tests/test_preequilibration.py b/python/tests/test_preequilibration.py
index d797c4bf3b..a42bc6354d 100644
--- a/python/tests/test_preequilibration.py
+++ b/python/tests/test_preequilibration.py
@@ -70,7 +70,16 @@ def preeq_fixture(pysb_example_presimulation_module):
[1, 1, 1],
]
- return (model, solver, edata, edata_preeq, edata_presim, edata_sim, pscales, plists)
+ return (
+ model,
+ solver,
+ edata,
+ edata_preeq,
+ edata_presim,
+ edata_sim,
+ pscales,
+ plists,
+ )
def test_manual_preequilibration(preeq_fixture):
@@ -133,7 +142,9 @@ def test_manual_preequilibration(preeq_fixture):
rdata_sim[variable],
atol=1e-6,
rtol=1e-6,
- err_msg=str(dict(pscale=pscale, plist=plist, variable=variable)),
+ err_msg=str(
+ dict(pscale=pscale, plist=plist, variable=variable)
+ ),
)
@@ -349,7 +360,10 @@ def test_equilibration_methods_with_adjoints(preeq_fixture):
amici.SteadyStateSensitivityMode.integrationOnly,
amici.SteadyStateSensitivityMode.integrateIfNewtonFails,
]
- sensi_meths = [amici.SensitivityMethod.forward, amici.SensitivityMethod.adjoint]
+ sensi_meths = [
+ amici.SensitivityMethod.forward,
+ amici.SensitivityMethod.adjoint,
+ ]
settings = itertools.product(equil_meths, sensi_meths)
for setting in settings:
@@ -374,7 +388,9 @@ def test_equilibration_methods_with_adjoints(preeq_fixture):
atol=1e-6,
rtol=1e-6,
err_msg=str(
- dict(variable=variable, setting1=setting1, setting2=setting2)
+ dict(
+ variable=variable, setting1=setting1, setting2=setting2
+ )
),
)
@@ -523,11 +539,15 @@ def test_steadystate_computation_mode(preeq_fixture):
assert rdatas[mode]["status"] == amici.AMICI_SUCCESS
assert np.all(
- rdatas[amici.SteadyStateComputationMode.integrationOnly]["preeq_status"][0]
+ rdatas[amici.SteadyStateComputationMode.integrationOnly][
+ "preeq_status"
+ ][0]
== [0, 1, 0]
)
assert (
- rdatas[amici.SteadyStateComputationMode.integrationOnly]["preeq_numsteps"][0][0]
+ rdatas[amici.SteadyStateComputationMode.integrationOnly][
+ "preeq_numsteps"
+ ][0][0]
== 0
)
@@ -536,7 +556,10 @@ def test_steadystate_computation_mode(preeq_fixture):
== [1, 0, 0]
)
assert (
- rdatas[amici.SteadyStateComputationMode.newtonOnly]["preeq_numsteps"][0][0] > 0
+ rdatas[amici.SteadyStateComputationMode.newtonOnly]["preeq_numsteps"][
+ 0
+ ][0]
+ > 0
)
# assert correct results
@@ -563,7 +586,9 @@ def test_simulation_errors(preeq_fixture):
) = preeq_fixture
solver.setSensitivityOrder(amici.SensitivityOrder.first)
- solver.setSensitivityMethodPreequilibration(amici.SensitivityMethod.forward)
+ solver.setSensitivityMethodPreequilibration(
+ amici.SensitivityMethod.forward
+ )
model.setSteadyStateSensitivityMode(
amici.SteadyStateSensitivityMode.integrationOnly
)
@@ -594,10 +619,16 @@ def test_simulation_errors(preeq_fixture):
rdata = amici.runAmiciSimulation(model, solver, e)
assert rdata["status"] != amici.AMICI_SUCCESS
assert rdata._swigptr.messages[0].severity == amici.LogSeverity_debug
- assert rdata._swigptr.messages[0].identifier == "CVODES:CVode:RHSFUNC_FAIL"
+ assert (
+ rdata._swigptr.messages[0].identifier
+ == "CVODES:CVode:RHSFUNC_FAIL"
+ )
assert rdata._swigptr.messages[1].severity == amici.LogSeverity_debug
assert rdata._swigptr.messages[1].identifier == "EQUILIBRATION_FAILURE"
- assert "exceedingly long simulation time" in rdata._swigptr.messages[1].message
+ assert (
+ "exceedingly long simulation time"
+ in rdata._swigptr.messages[1].message
+ )
assert rdata._swigptr.messages[2].severity == amici.LogSeverity_error
assert rdata._swigptr.messages[2].identifier == "OTHER"
assert rdata._swigptr.messages[3].severity == amici.LogSeverity_debug
diff --git a/python/tests/test_pregenerated_models.py b/python/tests/test_pregenerated_models.py
index bd4bb7e53b..5a110cdfc2 100755
--- a/python/tests/test_pregenerated_models.py
+++ b/python/tests/test_pregenerated_models.py
@@ -97,12 +97,17 @@ def test_pregenerated_model(sub_test, case):
verify_simulation_opts["atol"] = 1e-5
verify_simulation_opts["rtol"] = 1e-2
- if model_name.startswith("model_robertson") and case == "sensiforwardSPBCG":
+ if (
+ model_name.startswith("model_robertson")
+ and case == "sensiforwardSPBCG"
+ ):
verify_simulation_opts["atol"] = 1e-3
verify_simulation_opts["rtol"] = 1e-3
verify_simulation_results(
- rdata, expected_results[sub_test][case]["results"], **verify_simulation_opts
+ rdata,
+ expected_results[sub_test][case]["results"],
+ **verify_simulation_opts,
)
if model_name == "model_steadystate" and case == "sensiforwarderrorint":
@@ -113,7 +118,9 @@ def test_pregenerated_model(sub_test, case):
if (
edata
and model_name != "model_neuron_o2"
- and not (model_name == "model_robertson" and case == "sensiforwardSPBCG")
+ and not (
+ model_name == "model_robertson" and case == "sensiforwardSPBCG"
+ )
):
if isinstance(edata, amici.amici.ExpData):
edatas = [edata, edata]
@@ -222,14 +229,18 @@ def verify_simulation_results(
subfields = expected_results["diagnosis"].keys()
else:
- attrs = [field for field in fields if field in expected_results.attrs.keys()]
+ attrs = [
+ field for field in fields if field in expected_results.attrs.keys()
+ ]
if "diagnosis" in expected_results.keys():
subfields = [
field
for field in fields
if field in expected_results["diagnosis"].keys()
]
- fields = [field for field in fields if field in expected_results.keys()]
+ fields = [
+ field for field in fields if field in expected_results.keys()
+ ]
if expected_results.attrs["status"][0] != 0:
assert rdata["status"] == expected_results.attrs["status"][0]
@@ -254,12 +265,22 @@ def verify_simulation_results(
continue
if field == "s2llh":
_check_results(
- rdata, field, expected_results[field][()], atol=1e-4, rtol=1e-3
+ rdata,
+ field,
+ expected_results[field][()],
+ atol=1e-4,
+ rtol=1e-3,
)
else:
_check_results(
- rdata, field, expected_results[field][()], atol=atol, rtol=rtol
+ rdata,
+ field,
+ expected_results[field][()],
+ atol=atol,
+ rtol=rtol,
)
for attr in attrs:
- _check_results(rdata, attr, expected_results.attrs[attr], atol=atol, rtol=rtol)
+ _check_results(
+ rdata, attr, expected_results.attrs[attr], atol=atol, rtol=rtol
+ )
diff --git a/python/tests/test_pysb.py b/python/tests/test_pysb.py
index 5673667e3f..52ca3a320f 100644
--- a/python/tests/test_pysb.py
+++ b/python/tests/test_pysb.py
@@ -141,7 +141,9 @@ def test_compare_to_pysb_simulation(example):
with amici.add_path(os.path.dirname(pysb.examples.__file__)):
with amici.add_path(
- os.path.join(os.path.dirname(__file__), "..", "tests", "pysb_test_models")
+ os.path.join(
+ os.path.dirname(__file__), "..", "tests", "pysb_test_models"
+ )
):
# load example
pysb.SelfExporter.cleanup() # reset pysb
@@ -185,7 +187,9 @@ def test_compare_to_pysb_simulation(example):
observables=list(pysb_model.observables.keys()),
)
- amici_model_module = amici.import_model_module(pysb_model.name, outdir)
+ amici_model_module = amici.import_model_module(
+ pysb_model.name, outdir
+ )
model_pysb = amici_model_module.getModel()
model_pysb.setTimepoints(tspan)
@@ -196,7 +200,9 @@ def test_compare_to_pysb_simulation(example):
rdata = amici.runAmiciSimulation(model_pysb, solver)
# check agreement of species simulations
- assert np.isclose(rdata["x"], pysb_simres.species, 1e-4, 1e-4).all()
+ assert np.isclose(
+ rdata["x"], pysb_simres.species, 1e-4, 1e-4
+ ).all()
if example not in [
"fricker_2010_apoptosis",
@@ -325,7 +331,8 @@ def test_heavyside_and_special_symbols():
"deg",
a() >> None,
pysb.Expression(
- "rate", sp.Piecewise((1, pysb.Observable("a", a()) < 1), (0.0, True))
+ "rate",
+ sp.Piecewise((1, pysb.Observable("a", a()) < 1), (0.0, True)),
),
)
@@ -374,4 +381,6 @@ def test_energy():
solver.setRelativeTolerance(1e-14)
solver.setAbsoluteTolerance(1e-14)
- check_derivatives(amici_model, solver, epsilon=1e-4, rtol=1e-2, atol=1e-2)
+ check_derivatives(
+ amici_model, solver, epsilon=1e-4, rtol=1e-2, atol=1e-2
+ )
diff --git a/python/tests/test_rdata.py b/python/tests/test_rdata.py
index cbfc6dc7a9..ac7659f363 100644
--- a/python/tests/test_rdata.py
+++ b/python/tests/test_rdata.py
@@ -2,7 +2,8 @@
import amici
import numpy as np
import pytest
-from numpy.testing import assert_array_equal
+from amici.numpy import evaluate
+from numpy.testing import assert_almost_equal, assert_array_equal
@pytest.fixture(scope="session")
@@ -23,7 +24,9 @@ def test_rdata_by_id(rdata_by_id_fixture):
assert_array_equal(rdata.by_id(model.getStateIds()[1]), rdata.x[:, 1])
assert_array_equal(rdata.by_id(model.getStateIds()[1], "x"), rdata.x[:, 1])
- assert_array_equal(rdata.by_id(model.getStateIds()[1], "x", model), rdata.x[:, 1])
+ assert_array_equal(
+ rdata.by_id(model.getStateIds()[1], "x", model), rdata.x[:, 1]
+ )
assert_array_equal(
rdata.by_id(model.getObservableIds()[0], "y", model), rdata.y[:, 0]
@@ -37,3 +40,27 @@ def test_rdata_by_id(rdata_by_id_fixture):
assert_array_equal(
rdata.by_id(model.getStateIds()[1], "sx", model), rdata.sx[:, :, 1]
)
+
+
+def test_evaluate(rdata_by_id_fixture):
+ # get IDs of model components
+ model, rdata = rdata_by_id_fixture
+ expr0_id = model.getExpressionIds()[0]
+ state1_id = model.getStateIds()[1]
+ observable0_id = model.getObservableIds()[0]
+
+ # ensure `evaluate` works for atoms
+ expr0 = rdata.by_id(expr0_id)
+ assert_array_equal(expr0, evaluate(expr0_id, rdata=rdata))
+
+ state1 = rdata.by_id(state1_id)
+ assert_array_equal(state1, evaluate(state1_id, rdata=rdata))
+
+ observable0 = rdata.by_id(observable0_id)
+ assert_array_equal(observable0, evaluate(observable0_id, rdata=rdata))
+
+ # ensure `evaluate` works for expressions
+ assert_almost_equal(
+ expr0 + state1 * observable0,
+ evaluate(f"{expr0_id} + {state1_id} * {observable0_id}", rdata=rdata),
+ )
diff --git a/python/tests/test_sbml_import.py b/python/tests/test_sbml_import.py
index 41ccdd925c..7c4a67c0a2 100644
--- a/python/tests/test_sbml_import.py
+++ b/python/tests/test_sbml_import.py
@@ -73,7 +73,10 @@ def test_sbml2amici_nested_observables_fail(simple_sbml_model):
sbml_importer.sbml2amici(
model_name=model_name,
output_dir=tmpdir,
- observables={"outer": {"formula": "inner"}, "inner": {"formula": "S1"}},
+ observables={
+ "outer": {"formula": "inner"},
+ "inner": {"formula": "S1"},
+ },
compute_conservation_laws=False,
generate_sensitivity_code=False,
compile=False,
@@ -135,11 +138,15 @@ def observable_dependent_error_model(simple_sbml_model):
"observable_s1_scaled": "0.02 * observable_s1_scaled",
},
)
- yield amici.import_model_module(module_name=model_name, module_path=tmpdir)
+ yield amici.import_model_module(
+ module_name=model_name, module_path=tmpdir
+ )
@skip_on_valgrind
-def test_sbml2amici_observable_dependent_error(observable_dependent_error_model):
+def test_sbml2amici_observable_dependent_error(
+ observable_dependent_error_model,
+):
"""Check gradients for model with observable-dependent error"""
model_module = observable_dependent_error_model
model = model_module.getModel()
@@ -149,9 +156,14 @@ def test_sbml2amici_observable_dependent_error(observable_dependent_error_model)
# generate artificial data
rdata = amici.runAmiciSimulation(model, solver)
assert_allclose(
- rdata.sigmay[:, 0], 0.1 + 0.05 * rdata.y[:, 0], rtol=1.0e-5, atol=1.0e-8
+ rdata.sigmay[:, 0],
+ 0.1 + 0.05 * rdata.y[:, 0],
+ rtol=1.0e-5,
+ atol=1.0e-8,
+ )
+ assert_allclose(
+ rdata.sigmay[:, 1], 0.02 * rdata.y[:, 1], rtol=1.0e-5, atol=1.0e-8
)
- assert_allclose(rdata.sigmay[:, 1], 0.02 * rdata.y[:, 1], rtol=1.0e-5, atol=1.0e-8)
edata = amici.ExpData(rdata, 1.0, 0.0)
edata.setObservedDataStdDev(np.nan)
@@ -196,7 +208,9 @@ def model_steadystate_module():
observables = amici.assignmentRules2observables(
sbml_importer.sbml,
- filter_function=lambda variable: variable.getId().startswith("observable_")
+ filter_function=lambda variable: variable.getId().startswith(
+ "observable_"
+ )
and not variable.getId().endswith("_sigma"),
)
@@ -210,7 +224,9 @@ def model_steadystate_module():
sigmas={"observable_x1withsigma": "observable_x1withsigma_sigma"},
)
- yield amici.import_model_module(module_name=module_name, module_path=outdir)
+ yield amici.import_model_module(
+ module_name=module_name, module_path=outdir
+ )
@pytest.fixture(scope="session")
@@ -223,7 +239,9 @@ def model_units_module():
with TemporaryDirectory() as outdir:
sbml_importer.sbml2amici(model_name=module_name, output_dir=outdir)
- yield amici.import_model_module(module_name=module_name, module_path=outdir)
+ yield amici.import_model_module(
+ module_name=module_name, module_path=outdir
+ )
def test_presimulation(sbml_example_presimulation_module):
@@ -323,7 +341,9 @@ def test_steadystate_simulation(model_steadystate_module):
solver.setRelativeTolerance(1e-12)
solver.setAbsoluteTolerance(1e-12)
- check_derivatives(model, solver, edata[0], atol=1e-3, rtol=1e-3, epsilon=1e-4)
+ check_derivatives(
+ model, solver, edata[0], atol=1e-3, rtol=1e-3, epsilon=1e-4
+ )
# Run some additional tests which need a working Model,
# but don't need precomputed expectations.
@@ -406,7 +426,9 @@ def model_test_likelihoods():
noise_distributions=noise_distributions,
)
- yield amici.import_model_module(module_name=module_name, module_path=outdir)
+ yield amici.import_model_module(
+ module_name=module_name, module_path=outdir
+ )
@skip_on_valgrind
@@ -512,7 +534,9 @@ def test_units(model_units_module):
@skip_on_valgrind
-@pytest.mark.skipif(os.name == "nt", reason="Avoid `CERTIFICATE_VERIFY_FAILED` error")
+@pytest.mark.skipif(
+ os.name == "nt", reason="Avoid `CERTIFICATE_VERIFY_FAILED` error"
+)
def test_sympy_exp_monkeypatch():
"""
This model contains a removeable discontinuity at t=0 that requires
@@ -557,7 +581,9 @@ def test_sympy_exp_monkeypatch():
# print sensitivity-related results
assert rdata["status"] == amici.AMICI_SUCCESS
- check_derivatives(model, solver, None, atol=1e-2, rtol=1e-2, epsilon=1e-3)
+ check_derivatives(
+ model, solver, None, atol=1e-2, rtol=1e-2, epsilon=1e-3
+ )
def normal_nllh(m, y, sigma):
@@ -594,7 +620,8 @@ def log_laplace_nllh(m, y, sigma):
def log10_laplace_nllh(m, y, sigma):
return sum(
- np.log(2 * sigma * m * np.log(10)) + np.abs(np.log10(y) - np.log10(m)) / sigma
+ np.log(2 * sigma * m * np.log(10))
+ + np.abs(np.log10(y) - np.log10(m)) / sigma
)
diff --git a/python/tests/test_sbml_import_special_functions.py b/python/tests/test_sbml_import_special_functions.py
index 1aa806156d..9d8f447511 100644
--- a/python/tests/test_sbml_import_special_functions.py
+++ b/python/tests/test_sbml_import_special_functions.py
@@ -51,7 +51,9 @@ def model_special_likelihoods():
noise_distributions=noise_distributions,
)
- yield amici.import_model_module(module_name=module_name, module_path=outdir)
+ yield amici.import_model_module(
+ module_name=module_name, module_path=outdir
+ )
@skip_on_valgrind
@@ -111,7 +113,12 @@ def test_special_likelihoods(model_special_likelihoods):
solver.setSensitivityMethod(sensi_method)
solver.setSensitivityOrder(amici.SensitivityOrder.first)
check_derivatives(
- model, solver, edata, atol=1e-4, rtol=1e-3, check_least_squares=False
+ model,
+ solver,
+ edata,
+ atol=1e-4,
+ rtol=1e-3,
+ check_least_squares=False,
)
# Test for m > y, i.e. in region with 0 density
@@ -205,9 +212,13 @@ def test_rateof():
assert_approx_equal(rdata["xdot"][i_S1], rdata["xdot"][i_p2])
assert_array_almost_equal_nulp(rdata.by_id("S3"), t, 10)
- assert_array_almost_equal_nulp(rdata.by_id("S2"), 2 * rdata.by_id("S3"))
+ assert_array_almost_equal_nulp(
+ rdata.by_id("S2"), 2 * rdata.by_id("S3")
+ )
assert_array_almost_equal_nulp(
rdata.by_id("S4")[1:], 0.5 * np.diff(rdata.by_id("S3")), 10
)
assert_array_almost_equal_nulp(rdata.by_id("p3"), 0)
- assert_array_almost_equal_nulp(rdata.by_id("p2"), 1 + rdata.by_id("S1"))
+ assert_array_almost_equal_nulp(
+ rdata.by_id("p2"), 1 + rdata.by_id("S1")
+ )
diff --git a/python/tests/test_splines.py b/python/tests/test_splines.py
index 2385631ab5..a7fe01e84a 100644
--- a/python/tests/test_splines.py
+++ b/python/tests/test_splines.py
@@ -61,7 +61,9 @@ def test_multiple_splines(**kwargs):
tols5 = (tols5, tols5, tols5)
tols = []
- for t0, t1, t2, t3, t4, t5 in zip(tols0, tols1, tols2, tols3, tols4, tols5):
+ for t0, t1, t2, t3, t4, t5 in zip(
+ tols0, tols1, tols2, tols3, tols4, tols5
+ ):
keys = set().union(
t0.keys(), t1.keys(), t2.keys(), t3.keys(), t4.keys(), t5.keys()
)
@@ -98,7 +100,8 @@ def test_multiple_splines(**kwargs):
# groundtruth = test_multiple_splines(return_groundtruth=True)
# They should be recomputed only if the splines used in the test change
precomputed_path = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), "test_splines_precomputed.npz"
+ os.path.dirname(os.path.abspath(__file__)),
+ "test_splines_precomputed.npz",
)
kwargs["groundtruth"] = dict(np.load(precomputed_path))
diff --git a/python/tests/test_splines_python.py b/python/tests/test_splines_python.py
index 905ae8b83d..4c4de5ccfc 100644
--- a/python/tests/test_splines_python.py
+++ b/python/tests/test_splines_python.py
@@ -217,7 +217,9 @@ def check_gradient(spline, t, params, params_values, expected, rel_tol=1e-9):
value = spline.evaluate(t)
subs = {pname: pvalue for (pname, pvalue) in zip(params, params_values)}
for p, exp in zip(params, expected):
- assert math.isclose(float(value.diff(p).subs(subs)), exp, rel_tol=rel_tol)
+ assert math.isclose(
+ float(value.diff(p).subs(subs)), exp, rel_tol=rel_tol
+ )
@skip_on_valgrind
@@ -232,13 +234,25 @@ def test_SplineUniformSensitivity():
)
check_gradient(spline, 0.00, params, params_values, [3.0, 1.0, 0.0])
check_gradient(
- spline, 0.25, params, params_values, [0.539062, 0.179688, 4.45312], rel_tol=1e-5
+ spline,
+ 0.25,
+ params,
+ params_values,
+ [0.539062, 0.179688, 4.45312],
+ rel_tol=1e-5,
)
check_gradient(spline, 1.0 / 3, params, params_values, [0.0, 0.0, 5.0])
- check_gradient(spline, 0.50, params, params_values, [0.1875, -0.125, 2.625])
+ check_gradient(
+ spline, 0.50, params, params_values, [0.1875, -0.125, 2.625]
+ )
check_gradient(spline, 2.0 / 3, params, params_values, [0.0, 0.0, 0.0])
check_gradient(
- spline, 0.75, params, params_values, [-1.07812, 0.179688, 0.1875], rel_tol=1e-5
+ spline,
+ 0.75,
+ params,
+ params_values,
+ [-1.07812, 0.179688, 0.1875],
+ rel_tol=1e-5,
)
check_gradient(spline, 1.00, params, params_values, [-6.0, 1.0, 3.0])
@@ -255,12 +269,19 @@ def test_SplineNonUniformSensitivity():
)
check_gradient(spline, 0.00, params, params_values, [3.0, 1.0, 0.0])
check_gradient(
- spline, 0.05, params, params_values, [1.3125, 0.4375, 2.89062], rel_tol=1e-5
+ spline,
+ 0.05,
+ params,
+ params_values,
+ [1.3125, 0.4375, 2.89062],
+ rel_tol=1e-5,
)
check_gradient(spline, 0.10, params, params_values, [0.0, 0.0, 5.0])
check_gradient(spline, 0.30, params, params_values, [-0.45, -0.3, 3.6])
check_gradient(spline, 0.50, params, params_values, [0.0, 0.0, 0.0])
- check_gradient(spline, 0.75, params, params_values, [-2.625, 0.4375, 0.921875])
+ check_gradient(
+ spline, 0.75, params, params_values, [-2.625, 0.4375, 0.921875]
+ )
check_gradient(spline, 1.00, params, params_values, [-6.0, 1.0, 3.0])
@@ -282,15 +303,30 @@ def test_SplineExplicitSensitivity():
)
check_gradient(spline, 0.00, params, params_values, [3.0, 1.0, 0.0])
check_gradient(
- spline, 0.25, params, params_values, [0.46875, 0.109375, 4.37109], rel_tol=1e-6
+ spline,
+ 0.25,
+ params,
+ params_values,
+ [0.46875, 0.109375, 4.37109],
+ rel_tol=1e-6,
)
check_gradient(spline, 1.0 / 3, params, params_values, [0.0, 0.0, 5.0])
check_gradient(
- spline, 0.50, params, params_values, [-0.166667, 0.0641793, 2.625], rel_tol=1e-5
+ spline,
+ 0.50,
+ params,
+ params_values,
+ [-0.166667, 0.0641793, 2.625],
+ rel_tol=1e-5,
)
check_gradient(spline, 2.0 / 3, params, params_values, [0.0, 0.0, 0.0])
check_gradient(
- spline, 0.75, params, params_values, [-0.75, 0.130923, 0.46875], rel_tol=1e-5
+ spline,
+ 0.75,
+ params,
+ params_values,
+ [-0.75, 0.130923, 0.46875],
+ rel_tol=1e-5,
)
check_gradient(spline, 1.00, params, params_values, [-6.0, 1.0, 3.0])
@@ -308,7 +344,12 @@ def test_SplineLogarithmicSensitivity():
)
check_gradient(spline, 0.00, params, params_values, [3.0, 1.0, 0.0])
check_gradient(
- spline, 0.25, params, params_values, [0.585881, 0.195294, 4.38532], rel_tol=1e-5
+ spline,
+ 0.25,
+ params,
+ params_values,
+ [0.585881, 0.195294, 4.38532],
+ rel_tol=1e-5,
)
check_gradient(spline, 1.0 / 3, params, params_values, [0.0, 0.0, 5.0])
check_gradient(
diff --git a/python/tests/test_splines_short.py b/python/tests/test_splines_short.py
index 37df5f5db9..59e54a3279 100644
--- a/python/tests/test_splines_short.py
+++ b/python/tests/test_splines_short.py
@@ -99,7 +99,9 @@ def test_splines_plist():
# Real spline #3
xx = UniformGrid(0, 5, number_of_nodes=6)
p1, p2, p3, p4, p5 = sp.symbols("p1 p2 p3 p4 p5")
- yy = np.asarray([p1 + p2, p2 * p3, p4, sp.cos(p1 + p3), p4 * sp.log(p1), p3])
+ yy = np.asarray(
+ [p1 + p2, p2 * p3, p4, sp.cos(p1 + p3), p4 * sp.log(p1), p3]
+ )
dd = np.asarray([-0.75, -0.875, p5, 0.125, 1.15057181, 0.0])
params = {p1: 1.0, p2: 0.5, p3: 1.5, p4: -0.25, p5: -0.5}
# print([y.subs(params).evalf() for y in yy])
diff --git a/python/tests/test_swig_interface.py b/python/tests/test_swig_interface.py
index 09cc4c78af..a746552b55 100644
--- a/python/tests/test_swig_interface.py
+++ b/python/tests/test_swig_interface.py
@@ -7,6 +7,7 @@
import numbers
import amici
+import numpy as np
def test_version_number(pysb_example_presimulation_module):
@@ -49,7 +50,9 @@ def test_copy_constructors(pysb_example_presimulation_module):
obj_clone = obj.clone()
- assert get_val(obj, attr) == get_val(obj_clone, attr), f"{obj} - {attr}"
+ assert get_val(obj, attr) == get_val(
+ obj_clone, attr
+ ), f"{obj} - {attr}"
# `None` values are skipped in `test_model_instance_settings`.
@@ -165,16 +168,22 @@ def test_model_instance_settings(pysb_example_presimulation_module):
# The new model has the default settings.
model_default_settings = amici.get_model_settings(model)
for name in model_instance_settings:
- if (name == "InitialStates" and not model.hasCustomInitialStates()) or (
+ if (
+ name == "InitialStates" and not model.hasCustomInitialStates()
+ ) or (
name
- == ("getInitialStateSensitivities", "setUnscaledInitialStateSensitivities")
+ == (
+ "getInitialStateSensitivities",
+ "setUnscaledInitialStateSensitivities",
+ )
and not model.hasCustomInitialStateSensitivities()
):
# Here the expected value differs from what the getter would return
assert model_default_settings[name] == []
else:
assert (
- model_default_settings[name] == model_instance_settings[name][i_default]
+ model_default_settings[name]
+ == model_instance_settings[name][i_default]
), name
# The grouped setter method works.
@@ -221,7 +230,9 @@ def test_interdependent_settings(pysb_example_presimulation_module):
# Some values need to be transformed to be tested in Python
# (e.g. SWIG objects). Default transformer is no transformation
# (the identity function).
- getter_transformers = {setting: (lambda x: x) for setting in original_settings}
+ getter_transformers = {
+ setting: (lambda x: x) for setting in original_settings
+ }
getter_transformers.update(
{
# Convert from SWIG object.
@@ -311,7 +322,9 @@ def test_unhandled_settings(pysb_example_presimulation_module):
name
for names in model_instance_settings
for name in (
- names if isinstance(names, tuple) else (f"get{names}", f"set{names}")
+ names
+ if isinstance(names, tuple)
+ else (f"get{names}", f"set{names}")
)
]
@@ -375,7 +388,9 @@ def test_model_instance_settings_custom_x0(pysb_example_presimulation_module):
assert not model.hasCustomInitialStateSensitivities()
settings = amici.get_model_settings(model)
model.setInitialStates(model.getInitialStates())
- model.setUnscaledInitialStateSensitivities(model.getInitialStateSensitivities())
+ model.setUnscaledInitialStateSensitivities(
+ model.getInitialStateSensitivities()
+ )
amici.set_model_settings(model, settings)
assert not model.hasCustomInitialStates()
assert not model.hasCustomInitialStateSensitivities()
@@ -428,3 +443,30 @@ def test_edata_repr():
assert expected_str in repr(e)
# avoid double delete!!
edata_ptr.release()
+
+
+def test_edata_equality_operator():
+ e1 = amici.ExpData(1, 2, 3, [3])
+ e2 = amici.ExpData(1, 2, 3, [3])
+ assert e1 == e2
+ # check that comparison with other types works
+ # this is not implemented by swig by default
+ assert e1 != 1
+
+
+def test_expdata_and_expdataview_are_deepcopyable():
+ edata1 = amici.ExpData(3, 2, 3, range(4))
+ edata1.setObservedData(np.zeros((3, 4)).flatten())
+
+ # ExpData
+ edata2 = copy.deepcopy(edata1)
+ assert edata1 == edata2
+ assert edata1.this != edata2.this
+ edata2.setTimepoints([0])
+ assert edata1 != edata2
+
+ # ExpDataView
+ ev1 = amici.ExpDataView(edata1)
+ ev2 = copy.deepcopy(ev1)
+ assert ev2._swigptr.this != ev1._swigptr.this
+ assert ev1 == ev2
diff --git a/python/tests/util.py b/python/tests/util.py
index 14f514c997..dde10eb454 100644
--- a/python/tests/util.py
+++ b/python/tests/util.py
@@ -31,7 +31,9 @@ def create_amici_model(sbml_model, model_name, **kwargs) -> AmiciModel:
else tempfile.mkdtemp()
)
- sbml_importer.sbml2amici(model_name=model_name, output_dir=output_dir, **kwargs)
+ sbml_importer.sbml2amici(
+ model_name=model_name, output_dir=output_dir, **kwargs
+ )
model_module = import_model_module(model_name, output_dir)
return model_module.getModel()
@@ -111,7 +113,9 @@ def create_event_assignment(target, assignment):
create_event_assignment(event_target, event_assignment)
else:
- create_event_assignment(event_def["target"], event_def["assignment"])
+ create_event_assignment(
+ event_def["target"], event_def["assignment"]
+ )
if to_file:
libsbml.writeSBMLToFile(document, to_file)
@@ -133,7 +137,9 @@ def check_trajectories_without_sensitivities(
solver.setAbsoluteTolerance(1e-15)
solver.setRelativeTolerance(1e-12)
rdata = runAmiciSimulation(amici_model, solver=solver)
- _check_close(rdata["x"], result_expected_x, field="x", rtol=5e-9, atol=1e-13)
+ _check_close(
+ rdata["x"], result_expected_x, field="x", rtol=5e-9, atol=1e-13
+ )
def check_trajectories_with_forward_sensitivities(
@@ -153,5 +159,9 @@ def check_trajectories_with_forward_sensitivities(
solver.setAbsoluteToleranceFSA(1e-15)
solver.setRelativeToleranceFSA(1e-13)
rdata = runAmiciSimulation(amici_model, solver=solver)
- _check_close(rdata["x"], result_expected_x, field="x", rtol=1e-10, atol=1e-12)
- _check_close(rdata["sx"], result_expected_sx, field="sx", rtol=1e-7, atol=1e-9)
+ _check_close(
+ rdata["x"], result_expected_x, field="x", rtol=1e-10, atol=1e-12
+ )
+ _check_close(
+ rdata["sx"], result_expected_sx, field="sx", rtol=1e-7, atol=1e-9
+ )
diff --git a/scripts/downloadAndBuildDoxygen.sh b/scripts/downloadAndBuildDoxygen.sh
index c51c05c599..19d86be5a1 100755
--- a/scripts/downloadAndBuildDoxygen.sh
+++ b/scripts/downloadAndBuildDoxygen.sh
@@ -1,6 +1,6 @@
#!/usr/bin/env bash
# Download and build Doxygen (in case apt or homebrew version is buggy again)
-set -e
+set -euo pipefail
SCRIPT_PATH=$(dirname "$BASH_SOURCE")
AMICI_PATH=$(cd "$SCRIPT_PATH"/.. && pwd)
@@ -8,8 +8,11 @@ AMICI_PATH=$(cd "$SCRIPT_PATH"/.. && pwd)
DOXYGEN_DIR="${AMICI_PATH}"/ThirdParty/doxygen
cd "${AMICI_PATH}"/ThirdParty
if [[ ! -d ${DOXYGEN_DIR} ]]; then
- # git clone --depth 1 https://github.com/doxygen/doxygen.git "${DOXYGEN_DIR}"
- git clone --single-branch --branch Release_1_9_7 --depth 1 https://github.com/doxygen/doxygen.git "${DOXYGEN_DIR}"
+ git clone --single-branch \
+ --branch Release_1_9_7 \
+ --depth 1 \
+ -c advice.detachedHead=false \
+ https://github.com/doxygen/doxygen.git "${DOXYGEN_DIR}"
fi
cd "${DOXYGEN_DIR}"
diff --git a/scripts/downloadAndBuildSwig.sh b/scripts/downloadAndBuildSwig.sh
index b7d7f9d865..5fa0896f6c 100755
--- a/scripts/downloadAndBuildSwig.sh
+++ b/scripts/downloadAndBuildSwig.sh
@@ -1,11 +1,13 @@
#!/usr/bin/env bash
# Download and build SWIG
-set -e
+#
+# Usage: downloadAndBuildSwig.sh [swig_version]
+set -euo pipefail
SCRIPT_PATH=$(dirname "$BASH_SOURCE")
AMICI_PATH=$(cd "$SCRIPT_PATH/.." && pwd)
-swig_version=4.1.1
+swig_version="${1:-"4.1.1"}"
SWIG_ARCHIVE="swig-${swig_version}.tar.gz"
SWIG_URL="http://downloads.sourceforge.net/project/swig/swig/swig-${swig_version}/${SWIG_ARCHIVE}"
SWIG_DIR="swig-${swig_version}"
diff --git a/scripts/installAmiciSource.sh b/scripts/installAmiciSource.sh
index aa330bef22..4e693468b7 100755
--- a/scripts/installAmiciSource.sh
+++ b/scripts/installAmiciSource.sh
@@ -27,10 +27,9 @@ else
source ${AMICI_PATH}/build/venv/bin/activate
fi
-pip install -U "setuptools<64"
pip install --upgrade pip wheel
pip install --upgrade pip scipy matplotlib coverage pytest \
pytest-cov cmake_build_extension numpy
pip install git+https://github.com/FFroehlich/pysb@fix_pattern_matching # pin to PR for SPM with compartments
-pip install --verbose -e ${AMICI_PATH}/python/sdist[petab,test,vis] --no-build-isolation
+AMICI_BUILD_TEMP="${AMICI_PATH}/python/sdist/build/temp" pip install --verbose -e ${AMICI_PATH}/python/sdist[petab,test,vis] --no-build-isolation
deactivate
diff --git a/src/CMakeLists.template.cmake b/src/CMakeLists.template.cmake
index f015a78638..43df61ff61 100644
--- a/src/CMakeLists.template.cmake
+++ b/src/CMakeLists.template.cmake
@@ -2,6 +2,11 @@
cmake_minimum_required(VERSION 3.15)
cmake_policy(VERSION 3.15...3.27)
+# cmake >=3.27
+if(POLICY CMP0144)
+ cmake_policy(SET CMP0144 NEW)
+endif(POLICY CMP0144)
+
project(TPL_MODELNAME)
set(CMAKE_CXX_STANDARD 17)
diff --git a/src/steadystateproblem.cpp b/src/steadystateproblem.cpp
index 816d0a4c53..c655b9b386 100644
--- a/src/steadystateproblem.cpp
+++ b/src/steadystateproblem.cpp
@@ -60,9 +60,9 @@ SteadystateProblem::SteadystateProblem(Solver const& solver, Model const& model)
"sensitivities during simulation");
if (solver.getSensitivityMethod() == SensitivityMethod::forward
&& model.getSteadyStateComputationMode()
- == SteadyStateComputationMode::newtonOnly
+ == SteadyStateComputationMode::newtonOnly
&& model.getSteadyStateSensitivityMode()
- == SteadyStateSensitivityMode::integrationOnly)
+ == SteadyStateSensitivityMode::integrationOnly)
throw AmiException("For forward sensitivity analysis steady-state "
"computation mode 'newtonOnly' and steady-state "
"sensitivity mode 'integrationOnly' are not "
@@ -152,7 +152,10 @@ void SteadystateProblem::findSteadyState(
/* Nothing worked, throw an as informative error as possible */
if (!checkSteadyStateSuccess())
- handleSteadyStateFailure();
+ handleSteadyStateFailure(
+ !turnOffNewton, !turnOffSimulation,
+ !turnOffNewton && !turnOffSimulation
+ );
}
void SteadystateProblem::findSteadyStateByNewtonsMethod(
@@ -394,16 +397,23 @@ void SteadystateProblem::getQuadratureBySimulation(
}
}
-[[noreturn]] void SteadystateProblem::handleSteadyStateFailure() {
+[[noreturn]] void SteadystateProblem::handleSteadyStateFailure(
+ bool tried_newton_1, bool tried_simulation, bool tried_newton_2
+) {
/* Throw error message according to error codes */
- std::string errorString = "Steady state computation failed. "
- "First run of Newton solver failed";
- writeErrorString(&errorString, steady_state_status_[0]);
- errorString.append(" Simulation to steady state failed");
- writeErrorString(&errorString, steady_state_status_[1]);
- errorString.append(" Second run of Newton solver failed");
- writeErrorString(&errorString, steady_state_status_[2]);
-
+ std::string errorString = "Steady state computation failed.";
+ if (tried_newton_1) {
+ errorString.append(" First run of Newton solver failed");
+ writeErrorString(&errorString, steady_state_status_[0]);
+ }
+ if (tried_simulation) {
+ errorString.append(" Simulation to steady state failed");
+ writeErrorString(&errorString, steady_state_status_[1]);
+ }
+ if (tried_newton_2) {
+ errorString.append(" Second run of Newton solver failed");
+ writeErrorString(&errorString, steady_state_status_[2]);
+ }
throw AmiException(errorString.c_str());
}
diff --git a/swig/edata.i b/swig/edata.i
index 59dcb4fa8a..f2f7d0da8a 100644
--- a/swig/edata.i
+++ b/swig/edata.i
@@ -74,6 +74,14 @@ def _edata_repr(self: "ExpData"):
%pythoncode %{
def __repr__(self):
return _edata_repr(self)
+
+def __eq__(self, other):
+ return other.__class__ == self.__class__ and __eq__(self, other)
+
+def __deepcopy__(self, memo):
+ # invoke copy constructor
+ return type(self)(self)
+
%}
};
%extend std::unique_ptr {
diff --git a/tests/benchmark-models/evaluate_benchmark.py b/tests/benchmark-models/evaluate_benchmark.py
index bcf1f63bb8..0c6e2e4122 100644
--- a/tests/benchmark-models/evaluate_benchmark.py
+++ b/tests/benchmark-models/evaluate_benchmark.py
@@ -29,12 +29,15 @@
ratios = (
pd.concat(
- [df[sensi] / df["t_sim"].values for sensi in ["t_fwd", "t_adj"]] + [df.np],
+ [df[sensi] / df["t_sim"].values for sensi in ["t_fwd", "t_adj"]]
+ + [df.np],
axis=1,
)
.reset_index()
.melt(id_vars=["index", "np"])
- .rename(columns={"index": "model", "variable": "sensitivity", "value": "ratio"})
+ .rename(
+ columns={"index": "model", "variable": "sensitivity", "value": "ratio"}
+ )
)
ratios["sensitivity"] = ratios["sensitivity"].replace(
{"t_fwd": "forward", "t_adj": "adjoint"}
@@ -48,7 +51,14 @@
for ir, row in ratios.iterrows():
if row.sensitivity == "adjoint":
continue
- g.text(ir, row["np"], int(row["np"]), color="black", ha="center", weight="bold")
+ g.text(
+ ir,
+ row["np"],
+ int(row["np"]),
+ color="black",
+ ha="center",
+ weight="bold",
+ )
plt.xticks(rotation=30, horizontalalignment="right")
plt.tight_layout()
diff --git a/tests/benchmark-models/test_petab_benchmark.py b/tests/benchmark-models/test_petab_benchmark.py
index 91bd117a81..0b3c6d80e0 100755
--- a/tests/benchmark-models/test_petab_benchmark.py
+++ b/tests/benchmark-models/test_petab_benchmark.py
@@ -19,7 +19,9 @@
RTOL: float = 1e-2
benchmark_path = (
- Path(__file__).parent.parent.parent / "Benchmark-Models-PEtab" / "Benchmark-Models"
+ Path(__file__).parent.parent.parent
+ / "Benchmark-Models-PEtab"
+ / "Benchmark-Models"
)
# reuse compiled models from test_benchmark_collection.sh
benchmark_outdir = Path(__file__).parent.parent.parent / "test_bmc"
@@ -67,11 +69,15 @@ def test_benchmark_gradient(model, scale):
# only fail on linear scale
pytest.skip()
- petab_problem = petab.Problem.from_yaml(benchmark_path / model / (model + ".yaml"))
+ petab_problem = petab.Problem.from_yaml(
+ benchmark_path / model / (model + ".yaml")
+ )
petab.flatten_timepoint_specific_output_overrides(petab_problem)
# Only compute gradient for estimated parameters.
- parameter_df_free = petab_problem.parameter_df.loc[petab_problem.x_free_ids]
+ parameter_df_free = petab_problem.parameter_df.loc[
+ petab_problem.x_free_ids
+ ]
parameter_ids = list(parameter_df_free.index)
# Setup AMICI objects.
@@ -160,7 +166,11 @@ def test_benchmark_gradient(model, scale):
df = pd.DataFrame(
[
{
- ("fd", r.metadata["size_absolute"], str(r.method_id)): r.value
+ (
+ "fd",
+ r.metadata["size_absolute"],
+ str(r.method_id),
+ ): r.value
for c in d.computers
for r in c.results
}
diff --git a/tests/benchmark-models/test_petab_model.py b/tests/benchmark-models/test_petab_model.py
index 5742d668c6..cf85147535 100755
--- a/tests/benchmark-models/test_petab_model.py
+++ b/tests/benchmark-models/test_petab_model.py
@@ -16,7 +16,12 @@
import petab
import yaml
from amici.logging import get_logger
-from amici.petab_objective import LLH, RDATAS, rdatas_to_measurement_df, simulate_petab
+from amici.petab_objective import (
+ LLH,
+ RDATAS,
+ rdatas_to_measurement_df,
+ simulate_petab,
+)
from petab.visualize import plot_problem
logger = get_logger(f"amici.{__name__}", logging.WARNING)
@@ -86,7 +91,8 @@ def parse_cli_args():
"-o",
"--simulation-file",
dest="simulation_file",
- help="File to write simulation result to, in PEtab" "measurement table format.",
+ help="File to write simulation result to, in PEtab"
+ "measurement table format.",
)
return parser.parse_args()
@@ -162,10 +168,14 @@ def main():
times["np"] = sum(problem.parameter_df[petab.ESTIMATE])
- pd.Series(times).to_csv(f"./tests/benchmark-models/{args.model_name}_benchmark.csv")
+ pd.Series(times).to_csv(
+ f"./tests/benchmark-models/{args.model_name}_benchmark.csv"
+ )
for rdata in rdatas:
- assert rdata.status == amici.AMICI_SUCCESS, f"Simulation failed for {rdata.id}"
+ assert (
+ rdata.status == amici.AMICI_SUCCESS
+ ), f"Simulation failed for {rdata.id}"
# create simulation PEtab table
sim_df = rdatas_to_measurement_df(
@@ -184,7 +194,8 @@ def main():
# save figure
for plot_id, ax in axs.items():
fig_path = os.path.join(
- args.model_directory, f"{args.model_name}_{plot_id}_vis.png"
+ args.model_directory,
+ f"{args.model_name}_{plot_id}_vis.png",
)
logger.info(f"Saving figure to {fig_path}")
ax.get_figure().savefig(fig_path, dpi=150)
@@ -211,7 +222,8 @@ def main():
if np.isclose(llh, ref_llh, rtol=rtol, atol=atol):
logger.info(
- f"Computed llh {llh:.4e} matches reference {ref_llh:.4e}." + tolstr
+ f"Computed llh {llh:.4e} matches reference {ref_llh:.4e}."
+ + tolstr
)
else:
logger.error(
diff --git a/tests/conftest.py b/tests/conftest.py
index 4d2f5521ff..9e90400518 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -48,7 +48,9 @@ def get_all_semantic_case_ids():
suite"""
pattern = re.compile(r"\d{5}")
return sorted(
- str(x.name) for x in SBML_SEMANTIC_CASES_DIR.iterdir() if pattern.match(x.name)
+ str(x.name)
+ for x in SBML_SEMANTIC_CASES_DIR.iterdir()
+ if pattern.match(x.name)
)
@@ -78,7 +80,9 @@ def pytest_generate_tests(metafunc):
def pytest_sessionfinish(session, exitstatus):
"""Process test results"""
global passed_ids
- terminalreporter = session.config.pluginmanager.get_plugin("terminalreporter")
+ terminalreporter = session.config.pluginmanager.get_plugin(
+ "terminalreporter"
+ )
terminalreporter.ensure_newline()
# parse test names to get passed case IDs (don't know any better way to
# access fixture values)
@@ -100,9 +104,14 @@ def write_passed_tags(passed_ids, out=sys.stdout):
passed_component_tags |= cur_component_tags
passed_test_tags |= cur_test_tags
- out.write("\nAt least one test with the following component tags has " "passed:\n")
+ out.write(
+ "\nAt least one test with the following component tags has "
+ "passed:\n"
+ )
out.write(" " + "\n ".join(sorted(passed_component_tags)))
- out.write("\n\nAt least one test with the following test tags has " "passed:\n")
+ out.write(
+ "\n\nAt least one test with the following test tags has " "passed:\n"
+ )
out.write(" " + "\n ".join(sorted(passed_test_tags)))
@@ -132,7 +141,9 @@ def get_tags_for_test(test_id: str) -> Tuple[Set[str], Set[str]]:
test_tags = set()
for line in f:
if line.startswith("testTags:"):
- test_tags = set(re.split(r"[ ,:]", line[len("testTags:") :].strip()))
+ test_tags = set(
+ re.split(r"[ ,:]", line[len("testTags:") :].strip())
+ )
test_tags.discard("")
if line.startswith("componentTags:"):
component_tags = set(
diff --git a/tests/cpp/CMakeLists.txt.in b/tests/cpp/CMakeLists.txt.in
index 0704ae5d1e..a93f5955c2 100644
--- a/tests/cpp/CMakeLists.txt.in
+++ b/tests/cpp/CMakeLists.txt.in
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 2.8.12)
+cmake_minimum_required(VERSION 3.15)
project(googletest-download NONE)
diff --git a/tests/generateTestConfig/example.py b/tests/generateTestConfig/example.py
index a0b2891344..963fdf64ce 100644
--- a/tests/generateTestConfig/example.py
+++ b/tests/generateTestConfig/example.py
@@ -18,7 +18,9 @@ def dict2hdf5(object, dictionary):
dtype = "f8"
else:
dtype = " List[int]:
def pytest_addoption(parser):
"""Add pytest CLI options"""
parser.addoption("--petab-cases", help="Test cases to run")
- parser.addoption("--only-pysb", help="Run only PySB tests", action="store_true")
+ parser.addoption(
+ "--only-pysb", help="Run only PySB tests", action="store_true"
+ )
parser.addoption(
"--only-sbml",
help="Run only SBML tests",
@@ -44,7 +46,10 @@ def pytest_generate_tests(metafunc):
"""Parameterize tests"""
# Run for all PEtab test suite cases
- if "case" in metafunc.fixturenames and "model_type" in metafunc.fixturenames:
+ if (
+ "case" in metafunc.fixturenames
+ and "model_type" in metafunc.fixturenames
+ ):
# Get CLI option
cases = metafunc.config.getoption("--petab-cases")
if cases:
@@ -59,7 +64,9 @@ def pytest_generate_tests(metafunc):
(case, "sbml", version)
for version in ("v1.0.0", "v2.0.0")
for case in (
- test_numbers if test_numbers else get_cases("sbml", version=version)
+ test_numbers
+ if test_numbers
+ else get_cases("sbml", version=version)
)
]
elif metafunc.config.getoption("--only-pysb"):
diff --git a/tests/petab_test_suite/test_petab_suite.py b/tests/petab_test_suite/test_petab_suite.py
index 59e2ce7723..35ee3adcfc 100755
--- a/tests/petab_test_suite/test_petab_suite.py
+++ b/tests/petab_test_suite/test_petab_suite.py
@@ -57,7 +57,9 @@ def _test_case(case, model_type, version):
# compile amici model
if case.startswith("0006"):
petab.flatten_timepoint_specific_output_overrides(problem)
- model_name = f"petab_{model_type}_test_case_{case}" f"_{version.replace('.', '_')}"
+ model_name = (
+ f"petab_{model_type}_test_case_{case}" f"_{version.replace('.', '_')}"
+ )
model_output_dir = f"amici_models/{model_name}"
model = import_petab_problem(
petab_problem=problem,
@@ -79,9 +81,13 @@ def _test_case(case, model_type, version):
rdatas = ret["rdatas"]
chi2 = sum(rdata["chi2"] for rdata in rdatas)
llh = ret["llh"]
- simulation_df = rdatas_to_measurement_df(rdatas, model, problem.measurement_df)
+ simulation_df = rdatas_to_measurement_df(
+ rdatas, model, problem.measurement_df
+ )
petab.check_measurement_df(simulation_df, problem.observable_df)
- simulation_df = simulation_df.rename(columns={petab.MEASUREMENT: petab.SIMULATION})
+ simulation_df = simulation_df.rename(
+ columns={petab.MEASUREMENT: petab.SIMULATION}
+ )
simulation_df[petab.TIME] = simulation_df[petab.TIME].astype(int)
solution = petabtests.load_solution(case, model_type, version=version)
gt_chi2 = solution[petabtests.CHI2]
@@ -109,17 +115,26 @@ def _test_case(case, model_type, version):
)
if not simulations_match:
with pd.option_context(
- "display.max_rows", None, "display.max_columns", None, "display.width", 200
+ "display.max_rows",
+ None,
+ "display.max_columns",
+ None,
+ "display.width",
+ 200,
):
logger.log(
logging.DEBUG,
- f"x_ss: {model.getStateIds()} " f"{[rdata.x_ss for rdata in rdatas]}",
+ f"x_ss: {model.getStateIds()} "
+ f"{[rdata.x_ss for rdata in rdatas]}",
+ )
+ logger.log(
+ logging.ERROR, f"Expected simulations:\n{gt_simulation_dfs}"
)
- logger.log(logging.ERROR, f"Expected simulations:\n{gt_simulation_dfs}")
logger.log(logging.ERROR, f"Actual simulations:\n{simulation_df}")
logger.log(
logging.DEBUG if chi2s_match else logging.ERROR,
- f"CHI2: simulated: {chi2}, expected: {gt_chi2}," f" match = {chi2s_match}",
+ f"CHI2: simulated: {chi2}, expected: {gt_chi2},"
+ f" match = {chi2s_match}",
)
logger.log(
logging.DEBUG if simulations_match else logging.ERROR,
@@ -130,7 +145,9 @@ def _test_case(case, model_type, version):
if not all([llhs_match, simulations_match]) or not chi2s_match:
logger.error(f"Case {case} failed.")
- raise AssertionError(f"Case {case}: Test results do not match " "expectations")
+ raise AssertionError(
+ f"Case {case}: Test results do not match " "expectations"
+ )
logger.info(f"Case {case} passed.")
@@ -159,7 +176,9 @@ def check_derivatives(
)
for edata in create_parameterized_edatas(
- amici_model=model, petab_problem=problem, problem_parameters=problem_parameters
+ amici_model=model,
+ petab_problem=problem,
+ problem_parameters=problem_parameters,
):
# check_derivatives does currently not support parameters in ExpData
model.setParameters(edata.parameters)
diff --git a/tests/testSBMLSuite.py b/tests/testSBMLSuite.py
index f11870b60d..cfad477ac4 100755
--- a/tests/testSBMLSuite.py
+++ b/tests/testSBMLSuite.py
@@ -44,7 +44,9 @@ def sbml_test_dir():
sys.path = old_path
-def test_sbml_testsuite_case(test_number, result_path, sbml_semantic_cases_dir):
+def test_sbml_testsuite_case(
+ test_number, result_path, sbml_semantic_cases_dir
+):
test_id = format_test_id(test_number)
model_dir = None
@@ -67,7 +69,8 @@ def test_sbml_testsuite_case(test_number, result_path, sbml_semantic_cases_dir):
results_file = current_test_path / f"{test_id}-results.csv"
results = pd.read_csv(results_file, delimiter=",")
results.rename(
- columns={c: c.replace(" ", "") for c in results.columns}, inplace=True
+ columns={c: c.replace(" ", "") for c in results.columns},
+ inplace=True,
)
# setup model
@@ -91,7 +94,9 @@ def test_sbml_testsuite_case(test_number, result_path, sbml_semantic_cases_dir):
raise RuntimeError("Simulation failed unexpectedly")
# verify
- simulated = verify_results(settings, rdata, results, wrapper, model, atol, rtol)
+ simulated = verify_results(
+ settings, rdata, results, wrapper, model, atol, rtol
+ )
# record results
write_result_file(simulated, test_id, result_path)
@@ -116,7 +121,10 @@ def verify_results(settings, rdata, expected, wrapper, model, atol, rtol):
# collect states
simulated = pd.DataFrame(
rdata["y"],
- columns=[obs["name"] for obs in wrapper.symbols[SymbolId.OBSERVABLE].values()],
+ columns=[
+ obs["name"]
+ for obs in wrapper.symbols[SymbolId.OBSERVABLE].values()
+ ],
)
simulated["time"] = rdata["ts"]
# collect parameters
@@ -128,14 +136,18 @@ def verify_results(settings, rdata, expected, wrapper, model, atol, rtol):
simulated[expr_id.removeprefix("flux_")] = rdata.w[:, expr_idx]
# handle renamed reserved symbols
simulated.rename(
- columns={c: c.replace("amici_", "") for c in simulated.columns}, inplace=True
+ columns={c: c.replace("amici_", "") for c in simulated.columns},
+ inplace=True,
)
# SBML test suite case 01308 defines species with initialAmount and
# hasOnlySubstanceUnits="true", but then request results as concentrations.
requested_concentrations = [
s
- for s in settings["concentration"].replace(" ", "").replace("\n", "").split(",")
+ for s in settings["concentration"]
+ .replace(" ", "")
+ .replace("\n", "")
+ .split(",")
if s
]
# We only need to convert species that have only substance units
@@ -145,7 +157,8 @@ def verify_results(settings, rdata, expected, wrapper, model, atol, rtol):
**wrapper.symbols[SymbolId.SPECIES],
**wrapper.symbols[SymbolId.ALGEBRAIC_STATE],
}.items()
- if str(state_id) in requested_concentrations and state.get("amount", False)
+ if str(state_id) in requested_concentrations
+ and state.get("amount", False)
]
amounts_to_concentrations(
concentration_species, wrapper, simulated, requested_concentrations
@@ -218,7 +231,9 @@ def concentrations_to_amounts(
# Species with OnlySubstanceUnits don't have to be converted as long
# as we don't request concentrations for them. Only applies when
# called from amounts_to_concentrations.
- if (is_amt and species not in requested_concentrations) or comp is None:
+ if (
+ is_amt and species not in requested_concentrations
+ ) or comp is None:
continue
simulated.loc[:, species] *= simulated.loc[
@@ -226,7 +241,9 @@ def concentrations_to_amounts(
]
-def write_result_file(simulated: pd.DataFrame, test_id: str, result_path: Path):
+def write_result_file(
+ simulated: pd.DataFrame, test_id: str, result_path: Path
+):
"""
Create test result file for upload to
http://raterule.caltech.edu/Facilities/Database
@@ -243,10 +260,14 @@ def get_amount_and_variables(settings):
"""Read amount and species from settings file"""
# species for which results are expected as amounts
- amount_species = settings["amount"].replace(" ", "").replace("\n", "").split(",")
+ amount_species = (
+ settings["amount"].replace(" ", "").replace("\n", "").split(",")
+ )
# IDs of all variables for which results are expected/provided
- variables = settings["variables"].replace(" ", "").replace("\n", "").split(",")
+ variables = (
+ settings["variables"].replace(" ", "").replace("\n", "").split(",")
+ )
return amount_species, variables
diff --git a/version.txt b/version.txt
index 1cf0537c34..5a03fb737b 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-0.19.0
+0.20.0