diff --git a/.clog.toml b/.clog.toml
index 698919671..d0796b7f1 100644
--- a/.clog.toml
+++ b/.clog.toml
@@ -1,4 +1,4 @@
[clog]
repository = "https://github.com/PolyJIT/benchbuild"
-changelog = "docs/CHANGELOG.md"
+changelog = "docs/source/CHANGELOG.md"
from-latest-tag = true
diff --git a/.coveragerc b/.coveragerc
index 13bb18d98..3ababd737 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -7,6 +7,7 @@ branch = True
parallel = True
[paths]
+data_file = ${BB_COVERAGE_PATH-default .}/.coverage
source =
benchbuild/
*/site-packages/benchbuild/
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 00bc8ba1b..43c301594 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -10,13 +10,13 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ["3.7", "3.8", "3.9", "3.10"]
+ python-version: ["3.9", "3.10"]
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
@@ -53,7 +53,8 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ["3.7", "3.8", "3.9", "3.10"]
+ python-version: ["3.9", "3.10"]
+ db_support: [true, false]
steps:
- uses: actions/checkout@v2
@@ -76,21 +77,29 @@ jobs:
- name: Run integration tests
env:
- BB_CONTAINER_ROOT: '/tmp'
- BB_CONTAINER_RUNROOT: '/tmp'
+ BB_DB_ENABLED: ${{ matrix.db_support }}
+ BB_DB_CONNECT_STRING: "sqlite://"
+ BB_COVERAGE_COLLECT: true
+ BB_COVERAGE_PATH: ${{ github.workspace }}
+ BB_COVERAGE_CONFIG: ${{ github.workspace }}/.coveragerc
+ BB_CONTAINER_ROOT: ${{ runner.temp }}
+ BB_CONTAINER_RUNROOT: ${{ runner.temp }}
+ BB_VERBOSITY: 5
run: |
coverage run -p `which benchbuild` bootstrap -s
coverage run -p `which benchbuild` config view
coverage run -p `which benchbuild` experiment view
coverage run -p `which benchbuild` project view
- coverage run -p `which benchbuild` -vvvvv run --full test
- coverage run -p `which benchbuild` -vvvvv slurm -E empty test
- coverage run -p `which benchbuild` -vvvvv slurm -E raw bzip2/benchbuild -- container --runroot /foo/bar
+ coverage run -p `which benchbuild` run --full test
+ coverage run -p `which benchbuild` slurm -E empty test
+ coverage run -p `which benchbuild` slurm -E raw bzip2/benchbuild -- container --runroot /foo/bar
+ coverage run -p `which benchbuild` run -E raw bzip2/benchbuild
+ coverage run -p `which benchbuild` container run -E raw bzip2/benchbuild
coverage run -p `which benchbuild` container bases --export -E raw bzip2/benchbuild
coverage run -p `which benchbuild` container bases --import -E raw bzip2/benchbuild
- coverage run -p `which benchbuild` container run -E raw bzip2/benchbuild
coverage run -p `which benchbuild` container rmi --with-projects -E raw bzip2/benchbuild
coverage combine
+ coverage report -m
- uses: actions/upload-artifact@master
with:
@@ -143,12 +152,12 @@ jobs:
python-version: ["3.10"]
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
@@ -161,14 +170,4 @@ jobs:
run: |
python -m pip install --upgrade pip
pip install .
- pip install -r requirements.txt
- pip install -r doc-requirements.txt
-
- - name: Build documentation
- run: |
- mkdocs build
-
- - name: Deploy documentation
- if: ${{ github.event_name == 'push' }}
- run: |
- mkdocs gh-deploy
+ pip install -r docs/requirements.txt
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
new file mode 100644
index 000000000..5b91c2029
--- /dev/null
+++ b/.github/workflows/docs.yml
@@ -0,0 +1,148 @@
+name: sphinx
+on: [push, pull_request]
+
+env:
+ DEFAULT_BRANCH: "master"
+ #SPHINXOPTS: "-W --keep-going -T"
+ # ^-- If these SPHINXOPTS are enabled, then be strict about the builds and fail on any warnings
+
+jobs:
+ build-and-deploy:
+ name: Build and gh-pages
+ runs-on: ubuntu-latest
+ steps:
+ # https://github.com/marketplace/actions/checkout
+ - uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ lfs: true
+
+ # https://github.com/marketplace/actions/setup-python
+ # ^-- This gives info on matrix testing.
+ - name: Install Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.10'
+
+ # https://docs.github.com/en/actions/guides/building-and-testing-python#caching-dependencies
+ # ^-- How to set up caching for pip on Ubuntu
+ - name: Cache pip
+ uses: actions/cache@v2
+ with:
+ path: ~/.cache/pip
+ key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }}
+ restore-keys: |
+ ${{ runner.os }}-pip-
+ ${{ runner.os }}-
+
+ # https://docs.github.com/en/actions/guides/building-and-testing-python#installing-dependencies
+ # ^-- This gives info on installing dependencies with pip
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+ pip install -r docs/requirements.txt
+ pip install .
+
+ - name: Debugging information
+ run: |
+ echo "github.ref:" ${{github.ref}}
+ echo "github.event_name:" ${{github.event_name}}
+ echo "github.head_ref:" ${{github.head_ref}}
+ echo "github.base_ref:" ${{github.base_ref}}
+ set -x
+ git rev-parse --abbrev-ref HEAD
+ git branch
+ git branch -a
+ git remote -v
+ python -V
+ pip list --not-required
+ pip list
+
+ # Build
+ - uses: ammaraskar/sphinx-problem-matcher@master
+ - name: Build Sphinx docs
+ run: |
+ make -Cdocs dirhtml
+ # This fixes broken copy button icons, as explained in
+ # https://github.com/coderefinery/sphinx-lesson/issues/50
+ # https://github.com/executablebooks/sphinx-copybutton/issues/110
+ # This can be removed once these PRs are accepted (but the
+ # fixes also need to propagate to other themes):
+ # https://github.com/sphinx-doc/sphinx/pull/8524
+ # https://github.com/readthedocs/sphinx_rtd_theme/pull/1025
+ sed -i 's/url_root="#"/url_root=""/' docs/build/dirhtml/index.html || true
+ # The following supports building all branches and combining on
+ # gh-pages
+
+ # Clone and set up the old gh-pages branch
+ - name: Clone old gh-pages
+ if: ${{ github.event_name == 'push' }}
+ run: |
+ set -x
+ git fetch
+ ( git branch gh-pages remotes/origin/gh-pages && git clone . --branch=gh-pages _gh-pages/ ) || mkdir _gh-pages
+ rm -rf _gh-pages/.git/
+ mkdir -p _gh-pages/branch/
+
+ # If a push and default branch, copy build to _gh-pages/ as the "main"
+ # deployment.
+ - name: Copy new build (default branch)
+ if: |
+ contains(github.event_name, 'push') &&
+ contains(github.ref, env.DEFAULT_BRANCH)
+ run: |
+ set -x
+ # Delete everything under _gh-pages/ that is from the
+ # primary branch deployment. Eicludes the other branches
+ # _gh-pages/branch-* paths, and not including
+ # _gh-pages itself.
+ find _gh-pages/ -mindepth 1 ! -path '_gh-pages/branch*' -delete
+ rsync -a docs/build/dirhtml/ _gh-pages/
+
+ # If a push and not on default branch, then copy the build to
+ # _gh-pages/branch/$brname (transforming '/' into '--')
+ - name: Copy new build (branch)
+ if: |
+ contains(github.event_name, 'push') &&
+ !contains(github.ref, env.DEFAULT_BRANCH)
+ run: |
+ set -x
+ #brname=$(git rev-parse --abbrev-ref HEAD)
+ brname="${{github.ref}}"
+ brname="${brname##refs/heads/}"
+ brdir=${brname//\//--} # replace '/' with '--'
+ rm -rf _gh-pages/branch/${brdir}
+ rsync -a docs/build/dirhtml/ _gh-pages/branch/${brdir}
+
+ # Go through each branch in _gh-pages/branch/, if it's not a
+ # ref, then delete it.
+ - name: Delete old feature branches
+ if: ${{ github.event_name == 'push' }}
+ run: |
+ set -x
+ for brdir in `ls _gh-pages/branch/` ; do
+ brname=${brdir//--/\/} # replace '--' with '/'
+ if ! git show-ref remotes/origin/$brname ; then
+ echo "Removing $brdir"
+ rm -r _gh-pages/branch/$brdir/
+ fi
+ done
+
+ # Add the .nojekyll file
+ - name: nojekyll
+ if: ${{ github.event_name == 'push' }}
+ run: |
+ touch _gh-pages/.nojekyll
+
+ # Deploy
+ # https://github.com/peaceiris/actions-gh-pages
+ - name: Deploy
+ uses: peaceiris/actions-gh-pages@v3
+ if: ${{ github.event_name == 'push' }}
+ #if: ${{ success() && github.event_name == 'push' && github.ref == 'refs/heads/$defaultBranch' }}
+ with:
+ publish_branch: gh-pages
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ publish_dir: _gh-pages/
+ force_orphan: true
diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml
index 61f7e0627..4a2e2e605 100644
--- a/.github/workflows/reviewdog.yml
+++ b/.github/workflows/reviewdog.yml
@@ -5,12 +5,12 @@ jobs:
name: reviewdog
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- - name: Set up Python 3.8
- uses: actions/setup-python@v2
+ - name: Set up Python 3.10
+ uses: actions/setup-python@v4
with:
- python-version: 3.8
+ python-version: "3.10"
- name: Install dependencies
run: |
diff --git a/.isort.cfg b/.isort.cfg
index d423d3a30..db45450ee 100644
--- a/.isort.cfg
+++ b/.isort.cfg
@@ -1,5 +1,5 @@
[settings]
-known_third_party = attr,dill,faker,git,jinja2,migrate,mock,parse,pathos,pkg_resources,plumbum,psutil,pygit2,pygtrie,pyparsing,pytest,pytest_git,result,rich,schema,setuptools,six,sqlalchemy,yaml
+known_third_party = attr,dill,faker,git,jinja2,mock,parse,pkg_resources,plumbum,psutil,pygtrie,pyparsing,pytest,pytest_git,result,rich,schema,setuptools,six,sqlalchemy,yaml
multi_line_output=3
use_parentheses = True
include_trailing_comma: True
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index fd052b087..b2c0eacfb 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -20,6 +20,10 @@ repos:
hooks:
- id: isort
args: ['-nis']
+- repo: https://github.com/MarcoGorelli/auto-walrus
+ rev: v0.2.2
+ hooks:
+ - id: auto-walrus
- repo: https://github.com/pre-commit/mirrors-yapf
rev: 'v0.32.0'
hooks:
diff --git a/.pylintrc b/.pylintrc
index ca0a04832..5bf0efc70 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -263,4 +263,4 @@ int-import-graph=
[EXCEPTIONS]
-overgeneral-exceptions=Exception
+overgeneral-exceptions=builtins.Exception
diff --git a/benchbuild/__init__.py b/benchbuild/__init__.py
index 0544e305d..9ff2ae338 100644
--- a/benchbuild/__init__.py
+++ b/benchbuild/__init__.py
@@ -28,8 +28,8 @@
def __init__() -> None:
"""Initialize all plugins and settings."""
- __PLUGINS__.discover()
- __SETTINGS__.setup_config(CFG)
+ if __PLUGINS__.discover():
+ __SETTINGS__.setup_config(CFG)
__init__()
diff --git a/benchbuild/command.py b/benchbuild/command.py
index 502652b12..a9ccd3045 100644
--- a/benchbuild/command.py
+++ b/benchbuild/command.py
@@ -2,9 +2,9 @@
import shutil
import sys
import typing as tp
-from collections.abc import Set
from contextlib import contextmanager
from pathlib import Path
+from typing import Protocol, runtime_checkable
from plumbum import local
from plumbum.commands.base import BoundEnvCommand
@@ -18,11 +18,6 @@
if tp.TYPE_CHECKING:
import benchbuild.project.Project # pylint: disable=unused-import
-if sys.version_info <= (3, 8):
- from typing_extensions import Protocol, runtime_checkable
-else:
- from typing import Protocol, runtime_checkable
-
LOG = logging.getLogger(__name__)
@@ -159,8 +154,7 @@ def rendered(
LOG.error("Cannot render a source directory without a project.")
return Path(self.unrendered)
- src_path = project.source_of(self.local)
- if src_path:
+ if (src_path := project.source_of(self.local)):
return Path(src_path)
return Path(project.builddir) / self.local
@@ -516,8 +510,8 @@ def env(self, **kwargs: str) -> None:
self._env.update(kwargs)
@property
- def label(self) -> tp.Optional[str]:
- return self._label
+ def label(self) -> str:
+ return self._label if self._label else self.name
@label.setter
def label(self, new_label: str) -> None:
@@ -594,7 +588,7 @@ def __str__(self) -> str:
command_str = f"{env_str} {command_str}"
if args_str:
command_str = f"{command_str} {args_str}"
- if self.label:
+ if self._label:
command_str = f"{self._label} {command_str}"
return command_str
@@ -647,12 +641,6 @@ def __str__(self) -> str:
def _is_relative_to(p: Path, other: Path) -> bool:
- if sys.version_info < (3, 9):
- try:
- p.relative_to(other)
- return True
- except ValueError:
- return False
return p.is_relative_to(other)
@@ -775,6 +763,6 @@ def filter_workload_index(
This removes all command lists from the index not matching `only`.
"""
- keys = {k for k in index if k and ((only and (k & only)) or (not only))}
+ keys = [k for k in index if k and ((only and (k & only)) or (not only))]
for k in keys:
yield index[k]
diff --git a/benchbuild/db/__init__.py b/benchbuild/db/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/benchbuild/db/manage.py b/benchbuild/db/manage.py
deleted file mode 100755
index 39fa3892e..000000000
--- a/benchbuild/db/manage.py
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env python
-from migrate.versioning.shell import main
-
-if __name__ == '__main__':
- main(debug='False')
diff --git a/benchbuild/db/migrate.cfg b/benchbuild/db/migrate.cfg
deleted file mode 100644
index d6b0b9bd2..000000000
--- a/benchbuild/db/migrate.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-[db_settings]
-repository_id=Benchbuild DB Schema
-version_table=migrate_version
-required_dbs=['postgres']
-use_timestamp_numbering=False
diff --git a/benchbuild/db/versions/001_Remove_RegressionTest_table.py b/benchbuild/db/versions/001_Remove_RegressionTest_table.py
deleted file mode 100644
index ac216e8fe..000000000
--- a/benchbuild/db/versions/001_Remove_RegressionTest_table.py
+++ /dev/null
@@ -1,46 +0,0 @@
-"""
-Remove unneeded Regressions table.
-
-This table can and should be reintroduced by an experiment that requires it.
-"""
-import sqlalchemy as sa
-from sqlalchemy import Column, ForeignKey, Integer, String, Table
-
-from benchbuild.utils.schema import exceptions, metadata
-
-META = metadata()
-REGRESSION = Table(
- 'regressions', META,
- Column('run_id',
- Integer,
- ForeignKey('run.id', onupdate="CASCADE", ondelete="CASCADE"),
- index=True,
- primary_key=True), Column('name', String), Column('module', String),
- Column('project_name', String))
-
-
-def upgrade(migrate_engine):
-
- @exceptions(
- error_is_fatal=False,
- error_messages={
- sa.exc.ProgrammingError:
- "Removing table 'Regressions' failed. Please delete the table manually"
- })
- def do_upgrade():
- META.bind = migrate_engine
- REGRESSION.drop()
-
- do_upgrade()
-
-
-def downgrade(migrate_engine):
-
- @exceptions(error_messages={
- sa.exc.ProgrammingError: "Adding table 'Regressions' failed."
- })
- def do_downgrade():
- META.bind = migrate_engine
- REGRESSION.create()
-
- do_downgrade()
diff --git a/benchbuild/db/versions/002_Remove_GlobalConfig_table.py b/benchbuild/db/versions/002_Remove_GlobalConfig_table.py
deleted file mode 100644
index 827d65b53..000000000
--- a/benchbuild/db/versions/002_Remove_GlobalConfig_table.py
+++ /dev/null
@@ -1,46 +0,0 @@
-"""
-Remove unneeded GlobalConfig table.
-
-This table can and should be reintroduced by an experiment that requires it.
-"""
-
-import sqlalchemy as sa
-from sqlalchemy import Column, ForeignKey, String, Table
-
-from benchbuild.utils.schema import GUID, exceptions, metadata
-
-META = metadata()
-GLOBAL = Table(
- 'globalconfig', META,
- Column('experiment_group',
- GUID(as_uuid=True),
- ForeignKey('experiment.id', onupdate="CASCADE", ondelete="CASCADE"),
- primary_key=True), Column('name', String, primary_key=True),
- Column('value', String))
-
-
-def upgrade(migrate_engine):
-
- @exceptions(
- error_is_fatal=False,
- error_messages={
- sa.exc.ProgrammingError:
- "Removing table 'globalconfig' failed. Please delete the table manually"
- })
- def do_upgrade():
- META.bind = migrate_engine
- GLOBAL.drop()
-
- do_upgrade()
-
-
-def downgrade(migrate_engine):
-
- @exceptions(error_messages={
- sa.exc.ProgrammingError: "Adding table 'globalconfig' failed."
- })
- def do_downgrade():
- META.bind = migrate_engine
- GLOBAL.create()
-
- do_downgrade()
diff --git a/benchbuild/db/versions/003_Unmanage_Events.py b/benchbuild/db/versions/003_Unmanage_Events.py
deleted file mode 100644
index 33e2e7ed4..000000000
--- a/benchbuild/db/versions/003_Unmanage_Events.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""
-Remove 'benchbuild_events' from the managed part of the schema.
-
-We do not delete this table during our upgrades,
-because we do not want to wipe measurement data.
-
-During downgrade we will make sure to create the table as needed.
-"""
-import sqlalchemy as sa
-from sqlalchemy import (BigInteger, Column, ForeignKey, Integer, MetaData,
- Numeric, SmallInteger, String, Table)
-
-from benchbuild.utils.schema import exceptions
-
-META = MetaData()
-
-# yapf: disable
-EVENTS = Table('benchbuild_events', META,
- Column('name', String, index=True),
- Column('start', Numeric, primary_key=True),
- Column('duration', Numeric),
- Column('id', Integer),
- Column('type', SmallInteger),
- Column('tid', BigInteger),
- Column('run_id', Integer,
- ForeignKey('run.id',
- onupdate='CASCADE', ondelete='CASCADE'),
- nullable=False, index=True, primary_key=True),
- extend_existing=True)
-# yapf: enable
-
-
-def upgrade(migrate_engine):
- pass
-
-
-def downgrade(migrate_engine):
-
- @exceptions(error_messages={
- sa.exc.ProgrammingError: "Adding table 'benchbuild_events' failed."
- })
- def do_downgrade():
- META.bind = migrate_engine
- EVENTS.create()
diff --git a/benchbuild/db/versions/__init__.py b/benchbuild/db/versions/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/benchbuild/environments/adapters/buildah.py b/benchbuild/environments/adapters/buildah.py
index b26a335ab..4897118a7 100644
--- a/benchbuild/environments/adapters/buildah.py
+++ b/benchbuild/environments/adapters/buildah.py
@@ -304,8 +304,7 @@ def find(self, tag: str) -> model.MaybeImage:
if tag in self.images:
return self.images[tag]
- image = self._find(tag)
- if image:
+ if (image := self._find(tag)):
self.images[tag] = image
return image
@@ -389,8 +388,7 @@ def _env(self, tag: str, name: str) -> tp.Optional[str]:
raise NotImplementedError
def temporary_mount(self, tag: str, source: str, target: str) -> None:
- image = self.find(tag)
- if image:
+ if (image := self.find(tag)):
image.mounts.append(model.Mount(source, target))
@@ -449,8 +447,7 @@ def _find(self, tag: str) -> model.MaybeImage:
LOG.debug("Could not find the image %s", tag)
return None
- results = res.unwrap()
- if results:
+ if (results := res.unwrap()):
json_results = json.loads(results)
if json_results:
#json_image = json_results.pop(0)
@@ -461,8 +458,7 @@ def _find(self, tag: str) -> model.MaybeImage:
return None
def _env(self, tag: str, name: str) -> tp.Optional[str]:
- image = self.find(tag)
- if image:
+ if (image := self.find(tag)):
return image.env.get(name)
return None
diff --git a/benchbuild/environments/adapters/common.py b/benchbuild/environments/adapters/common.py
index 46e84ab39..6b5159da3 100644
--- a/benchbuild/environments/adapters/common.py
+++ b/benchbuild/environments/adapters/common.py
@@ -49,6 +49,7 @@ def path_longer_than_50_chars(path: str) -> bool:
def wrapped_cmd(*args: str) -> BaseCommand:
root = CFG['container']['root']
runroot = CFG['container']['runroot']
+ storage_driver = CFG['container']['storage_driver']
if path_longer_than_50_chars(str(root)):
LOG.error(
@@ -60,7 +61,10 @@ def wrapped_cmd(*args: str) -> BaseCommand:
'%s - %s', runroot.__to_env_var__(), __MSG_SHORTER_PATH_REQUIRED
)
- opts = ['--root', root, '--runroot', runroot]
+ opts = [
+ '--root', root, '--runroot', runroot,
+ '--storage-driver', storage_driver
+ ]
cmd = base[opts]
return cmd[args]
diff --git a/benchbuild/environments/adapters/podman.py b/benchbuild/environments/adapters/podman.py
index 84dbb3554..72a1e70a3 100644
--- a/benchbuild/environments/adapters/podman.py
+++ b/benchbuild/environments/adapters/podman.py
@@ -5,7 +5,6 @@
from plumbum import local, ProcessExecutionError
from result import Result, Err, Ok
from rich import print
-from rich.markdown import Markdown
from benchbuild.environments.adapters import buildah
from benchbuild.environments.adapters.common import (
@@ -84,8 +83,7 @@ def find(self, container_id: str) -> model.MaybeContainer:
if container_id in self.containers:
return self.containers[container_id]
- container = self._find(container_id)
- if container is not None:
+ if (container := self._find(container_id)) is not None:
self.containers[container_id] = container
return container
@@ -166,13 +164,15 @@ def _create(
for mount in mounts:
create_cmd = create_cmd['--mount', mount]
- cfg_mounts = list(CFG['container']['mounts'].value)
- if cfg_mounts:
+ if (cfg_mounts := list(CFG['container']['mounts'].value)):
for source, target in cfg_mounts:
create_cmd = create_cmd[
'--mount', f'type=bind,src={source},target={target}']
if interactive:
+ # pylint: disable=import-outside-toplevel
+ from rich.markdown import Markdown
+
entrypoint = buildah.find_entrypoint(image.name)
print(
Markdown(
diff --git a/benchbuild/environments/domain/declarative.py b/benchbuild/environments/domain/declarative.py
index 467b43cc7..5c2ea1582 100644
--- a/benchbuild/environments/domain/declarative.py
+++ b/benchbuild/environments/domain/declarative.py
@@ -3,21 +3,21 @@
full control about the environment your [projects](/concepts/projects/) and
[experiments](/concepts/experiments/) may run in.
-The following example uses the latest ``alpine:latest``
-
-Example:
-```python
-ContainerImage().from_('alpine:latest')
- .run('apk', 'update')
- .run('apk', 'add', 'python3')
-```
+The following example uses the latest ``alpine:latest``:
+
+.. code-block:: python
+
+ ContainerImage().from_('alpine:latest')
+ .run('apk', 'update')
+ .run('apk', 'add', 'python3')
+
"""
import logging
import typing as tp
-from benchbuild.settings import CFG
from benchbuild.environments.adapters.common import buildah_version
+from benchbuild.settings import CFG
from . import model
@@ -169,7 +169,7 @@ def command(self, *args: str) -> 'ContainerImage':
DEFAULT_BASES: tp.Dict[str, ContainerImage] = {
'benchbuild:alpine': ContainerImage() \
- .from_("docker.io/alpine:edge") \
+ .from_("docker.io/alpine:3.17") \
.run('apk', 'update') \
.run('apk', 'add', 'python3', 'python3-dev', 'postgresql-dev',
'linux-headers', 'musl-dev', 'git', 'gcc', 'g++',
diff --git a/benchbuild/environments/service_layer/debug.py b/benchbuild/environments/service_layer/debug.py
index d6bea6265..d08d0b070 100644
--- a/benchbuild/environments/service_layer/debug.py
+++ b/benchbuild/environments/service_layer/debug.py
@@ -1,6 +1,5 @@
from plumbum import ProcessExecutionError
from rich import print
-from rich.markdown import Markdown
from benchbuild.environments.adapters.common import bb_buildah
from benchbuild.environments.domain import events
@@ -25,6 +24,8 @@ def debug_image_kept(
"""
Spawn a debug session of the kept image and provide diagnostics.
"""
+ # pylint: disable=import-outside-toplevel
+ from rich.markdown import Markdown
with uow:
container = uow.create(event.image_name, event.failed_image_name)
if container is None:
diff --git a/benchbuild/environments/service_layer/unit_of_work.py b/benchbuild/environments/service_layer/unit_of_work.py
index 57ee38d9c..b32efeb93 100644
--- a/benchbuild/environments/service_layer/unit_of_work.py
+++ b/benchbuild/environments/service_layer/unit_of_work.py
@@ -2,6 +2,7 @@
import logging
import sys
import typing as tp
+from typing import Protocol
from plumbum import local
from plumbum.path.utils import delete
@@ -10,11 +11,6 @@
from benchbuild.environments.adapters import common, buildah, podman
from benchbuild.environments.domain import model, events
-if sys.version_info <= (3, 8):
- from typing_extensions import Protocol
-else:
- from typing import Protocol
-
LOG = logging.getLogger(__name__)
diff --git a/benchbuild/extensions/compiler.py b/benchbuild/extensions/compiler.py
index ad8d7aadc..5d1498c0a 100644
--- a/benchbuild/extensions/compiler.py
+++ b/benchbuild/extensions/compiler.py
@@ -6,11 +6,12 @@
from plumbum.commands.base import BoundCommand
from benchbuild.extensions import base
+from benchbuild.settings import CFG
from benchbuild.utils import db, run
if TYPE_CHECKING:
- from benchbuild.project import Project
from benchbuild.experiment import Experiment
+ from benchbuild.project import Project
LOG = logging.getLogger(__name__)
@@ -65,9 +66,10 @@ def __call__(
default_flow_style=False
)
)
- db.persist_config(
- run_info.db_run, run_info.session, self.config
- )
+ if CFG["db"]["enabled"]:
+ db.persist_config(
+ run_info.db_run, run_info.session, self.config
+ )
if run_info.has_failed:
with run.track_execution(
diff --git a/benchbuild/extensions/run.py b/benchbuild/extensions/run.py
index ff8ddd963..fbec1ba40 100644
--- a/benchbuild/extensions/run.py
+++ b/benchbuild/extensions/run.py
@@ -5,6 +5,7 @@
from plumbum import local
from benchbuild.extensions import base
+from benchbuild.settings import CFG
from benchbuild.utils import db, run
from benchbuild.utils.settings import get_number_of_jobs
@@ -45,9 +46,10 @@ def __call__(self, binary_command, *args, **kwargs):
)
self.config['baseline'] = \
os.getenv("BB_IS_BASELINE", "False")
- db.persist_config(
- run_info.db_run, run_info.session, self.config
- )
+ if CFG["db"]["enabled"]:
+ db.persist_config(
+ run_info.db_run, run_info.session, self.config
+ )
res = self.call_next(binary_command, *args, **kwargs)
res.append(run_info)
return res
@@ -69,6 +71,7 @@ def __init__(self, *extensions, limit="10m", **kwargs):
self.limit = limit
def __call__(self, binary_command, *args, **kwargs):
+ # pylint: disable=import-outside-toplevel
from benchbuild.utils.cmd import timeout
return self.call_next(
timeout[self.limit, binary_command], *args, **kwargs
@@ -83,8 +86,6 @@ class SetThreadLimit(base.Extension):
"""
def __call__(self, binary_command, *args, **kwargs):
- from benchbuild.settings import CFG
-
config = self.config
if config is not None and 'jobs' in config.keys():
jobs = get_number_of_jobs(config)
diff --git a/benchbuild/extensions/time.py b/benchbuild/extensions/time.py
index 8976adbdd..defcddb6c 100644
--- a/benchbuild/extensions/time.py
+++ b/benchbuild/extensions/time.py
@@ -4,6 +4,7 @@
import parse
from benchbuild.extensions import base
+from benchbuild.settings import CFG
from benchbuild.utils import db
from benchbuild.utils.cmd import time
@@ -20,6 +21,10 @@ def __call__(self, binary_command, *args, may_wrap=True, **kwargs):
def handle_timing(run_infos):
"""Takes care of the formating for the timing statistics."""
+ if not CFG["db"]["enabled"]:
+ return run_infos
+
+ # pylint: disable=import-outside-toplevel
from benchbuild.utils import schema as s
session = s.Session()
diff --git a/benchbuild/plugins.py b/benchbuild/plugins.py
index 5070399f7..386ec9f20 100644
--- a/benchbuild/plugins.py
+++ b/benchbuild/plugins.py
@@ -23,15 +23,19 @@
LOG = logging.getLogger(__name__)
-def discover() -> None:
+def discover() -> bool:
"""Import all plugins listed in our configuration."""
+ something_imported = False
if CFG["plugins"]["autoload"]:
experiment_plugins = CFG["plugins"]["experiments"].value
project_plugins = CFG["plugins"]["projects"].value
for plugin in itertools.chain(experiment_plugins, project_plugins):
+ something_imported = True
try:
importlib.import_module(plugin)
except ImportError as import_error:
LOG.error("Could not find '%s'", import_error.name)
LOG.debug("ImportError: %s", import_error)
+
+ return something_imported
diff --git a/benchbuild/project.py b/benchbuild/project.py
index d51197dbd..462243496 100644
--- a/benchbuild/project.py
+++ b/benchbuild/project.py
@@ -296,8 +296,7 @@ def __default_revision(self) -> source.Revision: # pylint: disable=unused-priva
@run_uuid.default
def __default_run_uuid(self): # pylint: disable=unused-private-member
- run_group = getenv("BB_DB_RUN_GROUP", None)
- if run_group:
+ if (run_group := getenv("BB_DB_RUN_GROUP", None)):
return uuid.UUID(run_group)
return uuid.uuid4()
@@ -336,7 +335,8 @@ def __default_primary_source(self) -> str: # pylint: disable=unused-private-mem
runtime_extension = attr.ib(default=None)
def __attrs_post_init__(self) -> None:
- db.persist_project(self)
+ if CFG["db"]["enabled"]:
+ db.persist_project(self)
# select container image
if isinstance(type(self).CONTAINER, ContainerImage):
@@ -344,18 +344,22 @@ def __attrs_post_init__(self) -> None:
ContainerImage, copy.deepcopy(type(self).CONTAINER)
)
else:
- if not isinstance(primary(*self.SOURCE), Git):
+ primary_source = primary(*self.SOURCE)
+ if isinstance(primary_source, source.BaseVersionFilter):
+ primary_source = primary_source.child
+ if not isinstance(primary_source, Git):
raise AssertionError(
"Container selection by version is only allowed if the"
"primary source is a git repository."
)
version = self.version_of_primary
- cache_path = str(primary(*self.SOURCE).fetch())
+ cache_path = str(primary_source.fetch())
for rev_range, image in type(self).CONTAINER:
rev_range.init_cache(cache_path)
- if version in rev_range:
- self.container = copy.deepcopy(image)
- break
+ for rev in rev_range:
+ if rev.startswith(version):
+ self.container = copy.deepcopy(image)
+ break
def clean(self) -> None:
"""Clean the project build directory."""
@@ -407,8 +411,7 @@ def source_of(self, name: str) -> tp.Optional[str]:
except KeyError:
LOG.debug("%s not found in revision. Skipping.", name)
- all_sources = source.sources_as_dict(*self.source)
- if name in all_sources:
+ if name in (all_sources := source.sources_as_dict(*self.source)):
return str(self.builddir / all_sources[name].local)
return None
diff --git a/benchbuild/res/wrapping/run_compiler.py.inc b/benchbuild/res/wrapping/run_compiler.py.inc
index 2cf9c9610..50d6135b7 100644
--- a/benchbuild/res/wrapping/run_compiler.py.inc
+++ b/benchbuild/res/wrapping/run_compiler.py.inc
@@ -2,10 +2,21 @@
#
import os
import sys
+
+{% if collect_coverage %}
+import coverage
+{% endif %}
+
os.environ["OPENBLAS_NUM_THREADS"] = "4"
from plumbum import TEE, local
+# Performance optimization for benchbuild: don't import any experiments or
+# projects. Everything necessary should be imported when loading (unpickling)
+# the project and the compiler.
+os.environ["BB_PLUGINS_AUTOLOAD"] = "False"
+
+from benchbuild.settings import CFG
from benchbuild.utils import log
from benchbuild.utils.db import persist_project
from benchbuild.utils.run import exit_code_from_run_infos
@@ -24,7 +35,8 @@ def update_project(argv):
name = project_p.basename
break
PROJECT.name = name
- persist_project(PROJECT)
+ if CFG["db"]["enabled"]:
+ persist_project(PROJECT)
def main(argv):
@@ -50,4 +62,18 @@ def main(argv):
if __name__ == "__main__":
- sys.exit(main(sys.argv))
+{% if collect_coverage %}
+ cov = coverage.Coverage(
+ config_file="{{ coverage_config }}",
+ data_file="{{ coverage_path }}/.coverage",
+ data_suffix=True,
+ branch=True
+ )
+ cov.start()
+{% endif %}
+ ret = main(sys.argv)
+{% if collect_coverage %}
+ cov.stop()
+ cov.save()
+{% endif %}
+ sys.exit(ret)
diff --git a/benchbuild/res/wrapping/run_dynamic.py.inc b/benchbuild/res/wrapping/run_dynamic.py.inc
index 40c058f1b..e6d0aa1a1 100644
--- a/benchbuild/res/wrapping/run_dynamic.py.inc
+++ b/benchbuild/res/wrapping/run_dynamic.py.inc
@@ -6,6 +6,15 @@ import sys
from plumbum import TEE, local
+{% if collect_coverage %}
+import coverage
+{% endif %}
+
+# Performance optimization for benchbuild: don't import any experiments or
+# projects. Everything necessary should be imported when loading (unpickling)
+# the project.
+os.environ["BB_PLUGINS_AUTOLOAD"] = "False"
+
from benchbuild.utils import log
from benchbuild.utils.db import persist_project
from benchbuild.utils.run import exit_code_from_run_infos
@@ -48,4 +57,18 @@ def main(argv):
if __name__ == "__main__":
- sys.exit(main(sys.argv))
+{% if collect_coverage %}
+ cov = coverage.Coverage(
+ config_file="{{ coverage_config }}",
+ data_file="{{ coverage_path }}/.coverage",
+ data_suffix=True,
+ branch=True
+ )
+ cov.start()
+{% endif %}
+ ret = main(sys.argv)
+{% if collect_coverage %}
+ cov.stop()
+ cov.save()
+{% endif %}
+ sys.exit(ret)
diff --git a/benchbuild/res/wrapping/run_static.py.inc b/benchbuild/res/wrapping/run_static.py.inc
index f64aac0f2..bd5f1b3a7 100644
--- a/benchbuild/res/wrapping/run_static.py.inc
+++ b/benchbuild/res/wrapping/run_static.py.inc
@@ -1,9 +1,19 @@
#!{{ python|default("/usr/bin/env python3") }}
#
+import os
import sys
+{% if collect_coverage %}
+import coverage
+{% endif %}
+
from plumbum import TEE, local
+# Performance optimization for benchbuild: don't import any experiments or
+# projects. Everything necessary should be imported when loading (unpickling)
+# the project.
+os.environ["BB_PLUGINS_AUTOLOAD"] = "False"
+
from benchbuild.utils import log
from benchbuild.utils.run import exit_code_from_run_infos
from benchbuild.utils.wrapping import load
@@ -32,4 +42,18 @@ def main(argv):
if __name__ == "__main__":
- sys.exit(main(sys.argv))
+{% if collect_coverage %}
+ cov = coverage.Coverage(
+ config_file="{{ coverage_config }}",
+ data_file="{{ coverage_path }}/.coverage",
+ data_suffix=True,
+ branch=True
+ )
+ cov.start()
+{% endif %}
+ ret = main(sys.argv)
+{% if collect_coverage %}
+ cov.stop()
+ cov.save()
+{% endif %}
+ sys.exit(ret)
diff --git a/benchbuild/settings.py b/benchbuild/settings.py
index a55138d5c..f44c1e9de 100644
--- a/benchbuild/settings.py
+++ b/benchbuild/settings.py
@@ -145,6 +145,10 @@
}
CFG['db'] = {
+ "enabled": {
+ "desc": "Whether the database is enabled.",
+ "default": False
+ },
"connect_string": {
"desc": "sqlalchemy connect string",
"default": "sqlite://"
@@ -378,6 +382,10 @@
"default": s.ConfigPath(os.getcwd()),
"desc": "Path to benchbuild's source directory"
},
+ "storage_driver": {
+ "default": "vfs",
+ "desc": "Storage driver for containers."
+ },
"input": {
"default": "container.tar.bz2",
"desc": "Input container file/folder."
@@ -505,5 +513,20 @@
}
}
+CFG["coverage"] = {
+ "collect": {
+ "desc": "Should benchuild collect coverage inside wrapped binaries.",
+ "default": False
+ },
+ "config": {
+ "desc": "Where is the coverage config?",
+ "default": ".coveragerc"
+ },
+ "path": {
+ "desc": "Where should the coverage files be placed?",
+ "default": None
+ }
+}
+
s.setup_config(CFG)
s.update_env(CFG)
diff --git a/benchbuild/source/base.py b/benchbuild/source/base.py
index 20805bdd2..7a851a8d2 100644
--- a/benchbuild/source/base.py
+++ b/benchbuild/source/base.py
@@ -5,17 +5,13 @@
import itertools
import sys
import typing as tp
+from typing import Protocol
import attr
import plumbum as pb
from benchbuild.settings import CFG
-if sys.version_info <= (3, 8):
- from typing_extensions import Protocol
-else:
- from typing import Protocol
-
if tp.TYPE_CHECKING:
from benchbuild.project import Project
diff --git a/benchbuild/utils/actions.py b/benchbuild/utils/actions.py
index 1ccf7c7bc..bcc1eb091 100644
--- a/benchbuild/utils/actions.py
+++ b/benchbuild/utils/actions.py
@@ -25,8 +25,6 @@
import typing as tp
from datetime import datetime
-import pathos.multiprocessing as mp
-import sqlalchemy as sa
from plumbum import ProcessExecutionError
from benchbuild import command, signals, source
@@ -250,7 +248,7 @@ def clean_mountpoints(root: str) -> None:
root: All UnionFS-mountpoints under this directory will be
unmounted.
"""
- import psutil
+ import psutil # pylint: disable=import-outside-toplevel
umount_paths = []
real_root = os.path.realpath(root)
@@ -317,6 +315,7 @@ def __call__(self) -> StepResult:
except ProcessExecutionError:
self.status = StepResult.ERROR
+ raise
self.status = StepResult.OK
return self.status
@@ -341,22 +340,27 @@ def __init__(
self.experiment = experiment
def __call__(self) -> StepResult:
- group, session = run.begin_run_group(self.project, self.experiment)
- signals.handlers.register(run.fail_run_group, group, session)
+ if CFG["db"]["enabled"]:
+ group, session = run.begin_run_group(self.project, self.experiment)
+ signals.handlers.register(run.fail_run_group, group, session)
try:
self.project.run_tests()
- run.end_run_group(group, session)
+ if CFG["db"]["enabled"]:
+ run.end_run_group(group, session)
self.status = StepResult.OK
except ProcessExecutionError:
- run.fail_run_group(group, session)
+ if CFG["db"]["enabled"]:
+ run.fail_run_group(group, session)
self.status = StepResult.ERROR
raise
except KeyboardInterrupt:
- run.fail_run_group(group, session)
+ if CFG["db"]["enabled"]:
+ run.fail_run_group(group, session)
self.status = StepResult.ERROR
raise
finally:
- signals.handlers.deregister(run.fail_run_group)
+ if CFG["db"]["enabled"]:
+ signals.handlers.deregister(run.fail_run_group)
return self.status
@@ -444,6 +448,7 @@ def __init__(
def begin_transaction(
self,
) -> tp.Tuple["benchbuild.utils.schema.Experiment", tp.Any]:
+ import sqlalchemy as sa # pylint: disable=import-outside-toplevel
experiment, session = db.persist_experiment(self.experiment)
if experiment.begin is None:
experiment.begin = datetime.now()
@@ -467,6 +472,7 @@ def begin_transaction(
def end_transaction(
experiment: "benchbuild.utils.schema.Experiment", session: tp.Any
) -> None:
+ import sqlalchemy as sa # pylint: disable=import-outside-toplevel
try:
experiment.end = max(experiment.end, datetime.now())
session.add(experiment)
@@ -475,6 +481,9 @@ def end_transaction(
LOG.error(inv_req)
def __run_children(self, num_processes: int) -> tp.List[StepResult]:
+ # pylint: disable=import-outside-toplevel
+ import pathos.multiprocessing as mp
+
results = []
actions = self.actions
@@ -496,12 +505,14 @@ def __run_children(self, num_processes: int) -> tp.List[StepResult]:
def __call__(self) -> StepResult:
results = []
session = None
- experiment, session = self.begin_transaction()
+ if CFG["db"]["enabled"]:
+ experiment, session = self.begin_transaction()
try:
results = self.__run_children(int(CFG["parallel_processes"]))
finally:
- self.end_transaction(experiment, session)
- signals.handlers.deregister(self.end_transaction)
+ if CFG["db"]["enabled"]:
+ self.end_transaction(experiment, session)
+ signals.handlers.deregister(self.end_transaction)
self.status = max(results) if results else StepResult.OK
return self.status
@@ -636,22 +647,27 @@ def __init__(
])
def __call__(self) -> StepResult:
- group, session = run.begin_run_group(self.project, self.experiment)
- signals.handlers.register(run.fail_run_group, group, session)
+ if CFG["db"]["enabled"]:
+ group, session = run.begin_run_group(self.project, self.experiment)
+ signals.handlers.register(run.fail_run_group, group, session)
try:
self.status = max([workload() for workload in self.actions],
default=StepResult.OK)
- run.end_run_group(group, session)
+ if CFG["db"]["enabled"]:
+ run.end_run_group(group, session)
except ProcessExecutionError:
- run.fail_run_group(group, session)
+ if CFG["db"]["enabled"]:
+ run.fail_run_group(group, session)
self.status = StepResult.ERROR
raise
except KeyboardInterrupt:
- run.fail_run_group(group, session)
+ if CFG["db"]["enabled"]:
+ run.fail_run_group(group, session)
self.status = StepResult.ERROR
raise
finally:
- signals.handlers.deregister(run.fail_run_group)
+ if CFG["db"]["enabled"]:
+ signals.handlers.deregister(run.fail_run_group)
return self.status
diff --git a/benchbuild/utils/db.py b/benchbuild/utils/db.py
index d8e50e4a3..26adc4e54 100644
--- a/benchbuild/utils/db.py
+++ b/benchbuild/utils/db.py
@@ -1,8 +1,6 @@
"""Database support module for the benchbuild study."""
import logging
-from sqlalchemy.exc import IntegrityError
-
from benchbuild.settings import CFG
LOG = logging.getLogger(__name__)
@@ -39,6 +37,7 @@ def create_run(cmd, project, exp, grp):
The inserted tuple representing the run and the session opened with
the new run. Don't forget to commit it at some point.
"""
+ # pylint: disable=import-outside-toplevel
from benchbuild.utils import schema as s
session = s.Session()
@@ -73,6 +72,7 @@ def create_run_group(prj, experiment):
A tuple (group, session) containing both the newly created run_group and
the transaction object.
"""
+ # pylint: disable=import-outside-toplevel
from benchbuild.utils import schema as s
session = s.Session()
@@ -137,6 +137,9 @@ def persist_experiment(experiment):
Args:
experiment: The experiment we want to persist.
"""
+ # pylint: disable=import-outside-toplevel
+ from sqlalchemy.exc import IntegrityError
+
from benchbuild.utils.schema import Experiment, Session
session = Session()
@@ -177,6 +180,7 @@ def persist_time(run, session, timings):
session: The db transaction we belong to.
timings: The timing measurements we want to store.
"""
+ # pylint: disable=import-outside-toplevel
from benchbuild.utils import schema as s
for timing in timings:
@@ -203,6 +207,7 @@ def persist_perf(run, session, svg_path):
session: The db transaction we belong to.
svg_path: The path to the SVG file we want to store.
"""
+ # pylint: disable=import-outside-toplevel
from benchbuild.utils import schema as s
with open(svg_path, 'r') as svg_file:
@@ -235,6 +240,7 @@ def persist_config(run, session, cfg):
session: The db transaction we belong to.
cfg: The configuration we want to persist.
"""
+ # pylint: disable=import-outside-toplevel
from benchbuild.utils import schema as s
for cfg_elem in cfg:
diff --git a/benchbuild/utils/log.py b/benchbuild/utils/log.py
index 056233241..03e6392cf 100644
--- a/benchbuild/utils/log.py
+++ b/benchbuild/utils/log.py
@@ -17,12 +17,6 @@ def __create_handler__() -> RichHandler:
)
-def configure_migrate_log():
- migrate_log = logging.getLogger("migrate.versioning")
- migrate_log.setLevel(logging.ERROR)
- migrate_log.propagate = True
-
-
def configure_plumbum_log():
plumbum_format = logging.Formatter('$> %(message)s')
handler = __create_handler__()
@@ -69,7 +63,6 @@ def configure():
root_logger.setLevel(log_levels[int(settings.CFG["verbosity"])])
configure_plumbum_log()
- configure_migrate_log()
configure_parse_log()
diff --git a/benchbuild/utils/requirements.py b/benchbuild/utils/requirements.py
index 2a35e26e2..3b62f2138 100644
--- a/benchbuild/utils/requirements.py
+++ b/benchbuild/utils/requirements.py
@@ -1,9 +1,9 @@
import abc
import copy
import logging
-import typing as tp
import math
import re
+import typing as tp
from enum import Enum
import attr
@@ -36,8 +36,9 @@ def to_cli_option(self) -> str:
@classmethod
@abc.abstractmethod
def merge_requirements(
- cls: tp.Type[RequirementSubType], lhs_option: RequirementSubType,
- rhs_option: RequirementSubType) -> RequirementSubType:
+ cls: tp.Type[RequirementSubType], lhs_option: RequirementSubType,
+ rhs_option: RequirementSubType
+ ) -> RequirementSubType:
"""
Merge the requirements of the same type together.
"""
@@ -94,8 +95,9 @@ def to_slurm_cli_opt(self) -> str:
@classmethod
def merge_requirements(
- cls, lhs_option: 'SlurmCoresPerSocket',
- rhs_option: 'SlurmCoresPerSocket') -> 'SlurmCoresPerSocket':
+ cls, lhs_option: 'SlurmCoresPerSocket',
+ rhs_option: 'SlurmCoresPerSocket'
+ ) -> 'SlurmCoresPerSocket':
"""
Merge the requirements of the same type together.
"""
@@ -117,8 +119,9 @@ def __repr__(self) -> str:
return "Exclusive"
@classmethod
- def merge_requirements(cls, lhs_option: 'SlurmExclusive',
- rhs_option: 'SlurmExclusive') -> 'SlurmExclusive':
+ def merge_requirements(
+ cls, lhs_option: 'SlurmExclusive', rhs_option: 'SlurmExclusive'
+ ) -> 'SlurmExclusive':
"""
Merge the requirements of the same type together.
"""
@@ -140,14 +143,17 @@ def to_slurm_cli_opt(self) -> str:
return f"--nice={self.niceness}"
@classmethod
- def merge_requirements(cls, lhs_option: 'SlurmNiceness',
- rhs_option: 'SlurmNiceness') -> 'SlurmNiceness':
+ def merge_requirements(
+ cls, lhs_option: 'SlurmNiceness', rhs_option: 'SlurmNiceness'
+ ) -> 'SlurmNiceness':
"""
Merge the requirements of the same type together.
"""
if lhs_option.niceness != rhs_option.niceness:
- LOG.info("Multiple different slurm niceness values specifcied, "
- "choosing the smaller value.")
+ LOG.info(
+ "Multiple different slurm niceness values specifcied, "
+ "choosing the smaller value."
+ )
return SlurmNiceness(min(lhs_option.niceness, rhs_option.niceness))
@@ -189,8 +195,9 @@ def __repr__(self) -> str:
return f"Hint ({str(self)})"
@classmethod
- def merge_requirements(cls, lhs_option: 'SlurmHint',
- rhs_option: 'SlurmHint') -> 'SlurmHint':
+ def merge_requirements(
+ cls, lhs_option: 'SlurmHint', rhs_option: 'SlurmHint'
+ ) -> 'SlurmHint':
"""
Merge the requirements of the same type together.
"""
@@ -199,7 +206,8 @@ def merge_requirements(cls, lhs_option: 'SlurmHint',
if not cls.__hints_not_mutually_exclusive(combined_hints):
raise ValueError(
- "Two mutally exclusive hints for slurm have be specified.")
+ "Two mutally exclusive hints for slurm have be specified."
+ )
return SlurmHint(combined_hints)
@@ -212,11 +220,15 @@ def __hints_not_mutually_exclusive(hints: tp.Set[SlurmHints]) -> bool:
Returns:
True, if no mutally exclusive hints are in the list
"""
- if (SlurmHint.SlurmHints.compute_bound in hints and
- SlurmHint.SlurmHints.memory_bound in hints):
+ if (
+ SlurmHint.SlurmHints.compute_bound in hints and
+ SlurmHint.SlurmHints.memory_bound in hints
+ ):
return False
- if (SlurmHint.SlurmHints.nomultithread in hints and
- SlurmHint.SlurmHints.multithread in hints):
+ if (
+ SlurmHint.SlurmHints.nomultithread in hints and
+ SlurmHint.SlurmHints.multithread in hints
+ ):
return False
return True
@@ -320,8 +332,9 @@ def to_slurm_cli_opt(self) -> str:
return f"--time={self.to_slurm_time_format()}"
@classmethod
- def merge_requirements(cls, lhs_option: 'SlurmTime',
- rhs_option: 'SlurmTime') -> 'SlurmTime':
+ def merge_requirements(
+ cls, lhs_option: 'SlurmTime', rhs_option: 'SlurmTime'
+ ) -> 'SlurmTime':
"""
Merge the requirements of the same type together.
"""
@@ -361,8 +374,7 @@ def _to_bytes(byte_str: str) -> int:
>>> _to_bytes("10G")
10737418240
"""
- match = _BYTE_RGX.search(byte_str)
- if match:
+ if (match := _BYTE_RGX.search(byte_str)):
size = int(match.group("size"))
byte_suffix = match.group("byte_suffix")
return size * _get_byte_size_factor(byte_suffix)
@@ -410,16 +422,18 @@ def to_slurm_cli_opt(self) -> str:
return f"--mem={byte_size_tuple[0]}{byte_size_tuple[1]}"
@classmethod
- def merge_requirements(cls, lhs_option: 'SlurmMem',
- rhs_option: 'SlurmMem') -> 'SlurmMem':
+ def merge_requirements(
+ cls, lhs_option: 'SlurmMem', rhs_option: 'SlurmMem'
+ ) -> 'SlurmMem':
"""
Merge the requirements of the same type together.
"""
return copy.deepcopy(max(lhs_option, rhs_option))
-def merge_slurm_options(list_1: tp.List[Requirement],
- list_2: tp.List[Requirement]) -> tp.List[Requirement]:
+def merge_slurm_options(
+ list_1: tp.List[Requirement], list_2: tp.List[Requirement]
+) -> tp.List[Requirement]:
"""
Merged two lists of SlurmOptions into one.
"""
@@ -430,7 +444,8 @@ def merge_slurm_options(list_1: tp.List[Requirement],
if key in merged_options:
current_opt = merged_options[key]
merged_options[key] = current_opt.merge_requirements(
- current_opt, opt)
+ current_opt, opt
+ )
else:
merged_options[key] = opt
diff --git a/benchbuild/utils/revision_ranges.py b/benchbuild/utils/revision_ranges.py
index 519d2faba..085f91629 100644
--- a/benchbuild/utils/revision_ranges.py
+++ b/benchbuild/utils/revision_ranges.py
@@ -7,12 +7,14 @@
import typing as tp
from enum import IntFlag
-import pygit2
from plumbum.machines import LocalCommand
from benchbuild.source import Git
from benchbuild.utils.cmd import git as local_git
+if tp.TYPE_CHECKING:
+ import pygit2
+
def _get_git_for_path(repo_path: str) -> LocalCommand:
"""
@@ -158,9 +160,9 @@ class CommitState(IntFlag):
def _find_blocked_commits(
- commit: pygit2.Commit, good: tp.List[pygit2.Commit],
- bad: tp.List[pygit2.Commit]
-) -> tp.List[pygit2.Commit]:
+ commit: 'pygit2.Commit', good: tp.List['pygit2.Commit'],
+ bad: tp.List['pygit2.Commit']
+) -> tp.List['pygit2.Commit']:
"""
Find all commits affected by a bad commit and not yet "fixed" by a
good commit. This is done by performing a backwards search starting
@@ -175,8 +177,8 @@ def _find_blocked_commits(
All transitive parents of commit that have an ancestor from bad
that is not fixed by some commit from good.
"""
- stack: tp.List[pygit2.Commit] = [commit]
- blocked: tp.Dict[pygit2.Commit, CommitState] = {}
+ stack: tp.List['pygit2.Commit'] = [commit]
+ blocked: tp.Dict['pygit2.Commit', CommitState] = {}
while stack:
current_commit = stack.pop()
@@ -239,6 +241,7 @@ def __init__(
self.__revision_list: tp.Optional[tp.List[str]] = None
def init_cache(self, repo_path: str) -> None:
+ import pygit2 # pylint: disable=import-outside-toplevel
self.__revision_list = []
repo = pygit2.Repository(repo_path)
git = _get_git_for_path(repo_path)
diff --git a/benchbuild/utils/run.py b/benchbuild/utils/run.py
index 6bb96c56f..c0781208c 100644
--- a/benchbuild/utils/run.py
+++ b/benchbuild/utils/run.py
@@ -5,6 +5,7 @@
import sys
import typing as t
from contextlib import contextmanager
+from typing import Protocol
import attr
from plumbum import TEE, local
@@ -13,11 +14,6 @@
from benchbuild import settings, signals
-if sys.version_info <= (3, 8):
- from typing_extensions import Protocol
-else:
- from typing import Protocol
-
CommandResult = t.Tuple[int, str, str]
@@ -66,6 +62,10 @@ def __begin(self, command: BaseCommand, project, experiment, group):
(run, session), where run is the generated run instance and
session the associated transaction for later use.
"""
+ if not CFG["db"]["enabled"]:
+ return
+
+ # pylint: disable=import-outside-toplevel
from benchbuild.utils import schema as s
from benchbuild.utils.db import create_run
@@ -95,6 +95,10 @@ def __end(self, stdout, stderr):
stdout: The stdout we captured of the run.
stderr: The stderr we capture of the run.
"""
+ if not CFG["db"]["enabled"]:
+ return
+
+ # pylint: disable=import-outside-toplevel
from benchbuild.utils.schema import RunLog
run_id = self.db_run.id
@@ -124,6 +128,10 @@ def __fail(self, retcode, stdout, stderr):
stdout: The stdout we captured of the run.
stderr: The stderr we capture of the run.
"""
+ if not CFG["db"]["enabled"]:
+ return
+
+ # pylint: disable=import-outside-toplevel
from benchbuild.utils.schema import RunLog
run_id = self.db_run.id
@@ -157,8 +165,9 @@ def __attrs_post_init__(self):
)
signals.handlers.register(self.__fail, 15, "SIGTERM", "SIGTERM")
- run_id = self.db_run.id
- settings.CFG["db"]["run_id"] = run_id
+ if CFG["db"]["enabled"]:
+ run_id = self.db_run.id
+ settings.CFG["db"]["run_id"] = run_id
def add_payload(self, name, payload):
if self == payload:
@@ -214,7 +223,8 @@ def __call__(self, *args, expected_retcode=0, ri=None, **kwargs):
return self
def commit(self):
- self.session.commit()
+ if CFG["db"]["enabled"]:
+ self.session.commit()
def begin_run_group(project, experiment):
@@ -231,6 +241,7 @@ def begin_run_group(project, experiment):
``(group, session)`` where group is the created group in the
database and session is the database session this group lives in.
"""
+ # pylint: disable=import-outside-toplevel
from benchbuild.utils.db import create_run_group
group, session = create_run_group(project, experiment)
@@ -345,6 +356,7 @@ def with_env_recursive(cmd: BaseCommand, **envvars: str) -> BaseCommand:
Returns:
The updated command.
"""
+ # pylint: disable=import-outside-toplevel
from plumbum.commands.base import BoundCommand, BoundEnvCommand
if isinstance(cmd, BoundCommand):
cmd.cmd = with_env_recursive(cmd.cmd, **envvars)
diff --git a/benchbuild/utils/schema.py b/benchbuild/utils/schema.py
index 17ddd422d..91cd4fdef 100644
--- a/benchbuild/utils/schema.py
+++ b/benchbuild/utils/schema.py
@@ -27,7 +27,6 @@
import typing as tp
import uuid
-import migrate.versioning.api as migrate
import sqlalchemy as sa
from sqlalchemy import (
Column,
@@ -364,86 +363,6 @@ def needed_schema(connection, meta):
return True
-def get_version_data():
- """Retreive migration information."""
- connect_str = str(settings.CFG["db"]["connect_string"])
- repo_url = path.template_path("../db/")
- return (connect_str, repo_url)
-
-
-@exceptions(
- error_messages={
- sa.exc.ProgrammingError: (
- 'Could not enforce versioning. Are you allowed to modify '
- 'the database?'
- )
- }
-)
-def enforce_versioning(force=False):
- """Install versioning on the db."""
- connect_str, repo_url = get_version_data()
- LOG.debug("Your database uses an unversioned benchbuild schema.")
- if not force and not ui.ask(
- "Should I enforce version control on your schema?"
- ):
- LOG.error("User declined schema versioning.")
- return None
- repo_version = migrate.version(repo_url, url=connect_str)
- migrate.version_control(connect_str, repo_url, version=repo_version)
- return repo_version
-
-
-def setup_versioning():
- connect_str, repo_url = get_version_data()
- repo_version = migrate.version(repo_url, url=connect_str)
- db_version = None
- requires_versioning = False
- try:
- db_version = migrate.db_version(connect_str, repo_url)
- except migrate.exceptions.DatabaseNotControlledError:
- requires_versioning = True
-
- if requires_versioning:
- db_version = enforce_versioning()
-
- return (repo_version, db_version)
-
-
-@exceptions(
- error_messages={
- sa.exc.ProgrammingError:
- "Update failed."
- " Base schema version diverged from the expected structure."
- }
-)
-def maybe_update_db(repo_version, db_version):
- if db_version is None:
- return
- if db_version == repo_version:
- return
-
- LOG.warning(
- "Your database contains version '%s' of benchbuild's schema.",
- db_version
- )
- LOG.warning(
- "Benchbuild currently requires version '%s' to work correctly.",
- repo_version
- )
- if not ui.ask(
- "Should I attempt to update your schema to version '{0}'?".
- format(repo_version)
- ):
- LOG.error("User declined schema upgrade.")
- return
-
- connect_str = str(settings.CFG["db"]["connect_string"])
- repo_url = path.template_path("../db/")
- LOG.info("Upgrading to newest version...")
- migrate.upgrade(connect_str, repo_url)
- LOG.info("Complete.")
-
-
class SessionManager:
def connect_engine(self):
@@ -500,10 +419,6 @@ def __init__(self):
if needed_schema(self.connection, BASE.metadata):
LOG.debug("Initialized new db schema.")
- repo_version = enforce_versioning(force=True)
- else:
- repo_version, db_version = setup_versioning()
- maybe_update_db(repo_version, db_version)
def get(self):
return sessionmaker(bind=self.connection)
diff --git a/benchbuild/utils/settings.py b/benchbuild/utils/settings.py
index 8cf0f5411..23b38c24d 100644
--- a/benchbuild/utils/settings.py
+++ b/benchbuild/utils/settings.py
@@ -17,12 +17,12 @@
import typing as tp
import uuid
import warnings
+from importlib.metadata import version, PackageNotFoundError
import attr
import schema
import six
import yaml
-from pkg_resources import DistributionNotFound, get_distribution
from plumbum import LocalPath, local
import benchbuild.utils.user_interface as ui
@@ -37,8 +37,8 @@ def __getitem__(self: 'Indexable', key: str) -> 'Indexable':
try:
- __version__ = get_distribution("benchbuild").version
-except DistributionNotFound:
+ __version__ = version("benchbuild")
+except PackageNotFoundError:
__version__ = "unknown"
LOG.error("could not find version information.")
@@ -142,7 +142,7 @@ def is_yaml(cfg_file: str) -> bool:
return os.path.splitext(cfg_file)[1] in [".yml", ".yaml"]
-class ConfigLoader(yaml.SafeLoader):
+class ConfigLoader(yaml.CSafeLoader): # type: ignore
"""Avoid polluting yaml's namespace with our modifications."""
@@ -322,17 +322,16 @@ def init_from_env(self) -> None:
if 'default' in self.node:
env_var = self.__to_env_var__().upper()
- if self.has_value():
- env_val = self.node['value']
- else:
- env_val = self.node['default']
- env_val = os.getenv(env_var, to_yaml(env_val))
- try:
- self.node['value'] = yaml.load(
- str(env_val), Loader=ConfigLoader
- )
- except ValueError:
- self.node['value'] = env_val
+ if not self.has_value():
+ self.node['value'] = self.node['default']
+ env_val = os.getenv(env_var, None)
+ if env_val is not None:
+ try:
+ self.node['value'] = yaml.load(
+ str(env_val), Loader=ConfigLoader
+ )
+ except ValueError:
+ self.node['value'] = env_val
else:
if isinstance(self.node, dict):
for k in self.node:
diff --git a/benchbuild/utils/wrapping.py b/benchbuild/utils/wrapping.py
index 8342e0036..29d3c1d05 100644
--- a/benchbuild/utils/wrapping.py
+++ b/benchbuild/utils/wrapping.py
@@ -27,10 +27,8 @@
import sys
import typing as tp
from pathlib import Path
-from typing import TYPE_CHECKING
import dill
-import jinja2
import plumbum as pb
from plumbum import local
from plumbum.commands.base import BoundCommand
@@ -43,7 +41,15 @@
LOG = logging.getLogger(__name__)
+# Configure default settings for dill pickle/unpickle, globally
+dill.settings['ignore'] = True
+dill.settings['recurse'] = True
+dill.settings['protocol'] = -1
+dill.settings['byref'] = True
+
if tp.TYPE_CHECKING:
+ import jinja2
+
import benchbuild.project.Project # pylint: disable=unused-import
@@ -71,17 +77,8 @@ def strip_path_prefix(ipath: Path, prefix: Path) -> Path:
return ipath
-def unpickle(pickle_file: str) -> tp.Any:
- """Unpickle a python object from the given path."""
- pickle = None
- with open(pickle_file, "rb") as pickle_f:
- pickle = dill.load(pickle_f)
- if not pickle:
- LOG.error("Could not load python object from file")
- return pickle
-
-
-def __create_jinja_env() -> jinja2.Environment:
+def __create_jinja_env() -> 'jinja2.Environment':
+ import jinja2 # pylint: disable=import-outside-toplevel
return jinja2.Environment(
trim_blocks=True,
lstrip_blocks=True,
@@ -136,6 +133,10 @@ def wrap(
env = CFG["env"].value
+ collect_coverage = bool(CFG["coverage"]["collect"])
+ coverage_config = str(CFG["coverage"]["config"])
+ coverage_path = str(CFG["coverage"]["path"])
+
bin_path = list_to_path(env.get("PATH", []))
bin_path = list_to_path([bin_path, os.environ["PATH"]])
@@ -152,6 +153,9 @@ def wrap(
ld_library_path=str(bin_lib_path),
home=str(home),
python=python,
+ collect_coverage=collect_coverage,
+ coverage_config=coverage_config,
+ coverage_path=coverage_path
)
)
@@ -211,6 +215,9 @@ def wrap_dynamic(
project_file = persist(project, suffix=".project")
cfg_env = CFG["env"].value
+ collect_coverage = bool(CFG["coverage"]["collect"])
+ coverage_config = str(CFG["coverage"]["config"])
+ coverage_path = str(CFG["coverage"]["path"])
bin_path = list_to_path(cfg_env.get("PATH", []))
bin_path = list_to_path([bin_path, os.environ["PATH"]])
@@ -229,6 +236,9 @@ def wrap_dynamic(
home=str(home),
python=python,
name_filters=name_filters,
+ collect_coverage=collect_coverage,
+ coverage_config=coverage_config,
+ coverage_path=coverage_path
)
)
@@ -269,6 +279,10 @@ def wrap_cc(
project_file = persist(project, suffix=".project")
+ collect_coverage = bool(CFG["coverage"]["collect"])
+ coverage_config = str(CFG["coverage"]["config"])
+ coverage_path = str(CFG["coverage"]["path"])
+
with open(filepath, "w") as wrapper:
wrapper.write(
template.render(
@@ -276,6 +290,9 @@ def wrap_cc(
project_file=str(project_file),
python=python,
detect_project=detect_project,
+ collect_coverage=collect_coverage,
+ coverage_config=coverage_config,
+ coverage_path=coverage_path
)
)
diff --git a/doc-requirements.txt b/doc-requirements.txt
deleted file mode 100644
index d9f18a69b..000000000
--- a/doc-requirements.txt
+++ /dev/null
@@ -1,4 +0,0 @@
--e git+https://github.com/simbuerg/mkapi#egg=mkapi
-mkdocs
-pheasant
-pymdown-extensions
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 000000000..d0c3cbf10
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
+SOURCEDIR = source
+BUILDDIR = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/basics/configuration.md b/docs/basics/configuration.md
deleted file mode 100644
index e69de29bb..000000000
diff --git a/docs/concepts/environments.md b/docs/concepts/environments.md
deleted file mode 100644
index 57dbc4aa8..000000000
--- a/docs/concepts/environments.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Environment
-
-![mkapi](benchbuild.environments.domain.declarative|all)
diff --git a/docs/concepts/source.md b/docs/concepts/source.md
deleted file mode 100644
index a6cfd7cfb..000000000
--- a/docs/concepts/source.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Source
-
-TODO.
-
-## General
-
-![mkapi](benchbuild.source.base)
-
-## Versioning
-
-![mkapi](benchbuild.source.versions)
-
-## HTTP
-
-![mkapi](benchbuild.source.http.HTTP)
-
-## Git
-
-![mkapi](benchbuild.source.git.Git)
-![mkapi](benchbuild.source.git.GitSubmodule)
-
-
-## Rsync
-
-TODO.
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 000000000..747ffb7b3
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=source
+set BUILDDIR=build
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.https://www.sphinx-doc.org/
+ exit /b 1
+)
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 000000000..55763240c
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,9 @@
+linkify
+mkdocs
+myst-parser[linkify]
+pymdown-extensions
+sphinx
+sphinx-autodoc-typehints
+sphinx-markdown
+sphinx-press-theme
+sphinxcontrib-programoutput
diff --git a/docs/CHANGELOG.md b/docs/source/CHANGELOG.md
similarity index 81%
rename from docs/CHANGELOG.md
rename to docs/source/CHANGELOG.md
index b300a604d..8d9f5f010 100644
--- a/docs/CHANGELOG.md
+++ b/docs/source/CHANGELOG.md
@@ -1,3 +1,46 @@
+
+## 6.7 (2023-04-04)
+
+
+#### Features
+
+* run auto-walrus over all of benchbuild's file ([79ac33d8](https://github.com/PolyJIT/benchbuild/commit/79ac33d8d69974fb6a1d049f75c5f013e444b2ce))
+* add support for auto-walrus as pre-commit hook ([d7a2165b](https://github.com/PolyJIT/benchbuild/commit/d7a2165bb22467754fd329fedc0c0f8330336e3f))
+* drop support for python 3.7 and 3.8 ([90308f2a](https://github.com/PolyJIT/benchbuild/commit/90308f2ae141fd4443f08ef55a4155e433fad929))
+* **ci:**
+ * update setup-python to v4 ([3e943df6](https://github.com/PolyJIT/benchbuild/commit/3e943df657cb06a4f2b47104a4e3dcb77f16793c))
+ * update github setup actions to v3 ([dfa4cb81](https://github.com/PolyJIT/benchbuild/commit/dfa4cb8135d3c0256c6b99dbe40771d8ce1e5a5d))
+* **command:** use name as default value for a command's label ([07f74dd4](https://github.com/PolyJIT/benchbuild/commit/07f74dd4932eecdfbd902d231242a65984501007))
+* **environments:** force base image to alpine:3.17 ([fe5d6155](https://github.com/PolyJIT/benchbuild/commit/fe5d615574130260e1af1aaa49b1058600ed9668))
+* **setup:**
+ * widen allowed versions to major versions ([5d29079a](https://github.com/PolyJIT/benchbuild/commit/5d29079a973dc1d8650ee9083090eaa7bd99cbfc))
+ * unlock latest versions of all packages ([7b5a704f](https://github.com/PolyJIT/benchbuild/commit/7b5a704f843872c90fa2a23eb738db4262883076))
+* **wrapping:** enforce global defaults for dill module ([489e3039](https://github.com/PolyJIT/benchbuild/commit/489e3039f8bb1ccee40219d1c27ca999cd8a3623))
+
+#### Bug Fixes
+
+* python version for setup-python@v4 has to be string ([7a1db742](https://github.com/PolyJIT/benchbuild/commit/7a1db74237e6b35687213920bb534b5308773615))
+* remove python 3.7 and 3.8 from all workflows ([aaabc1b5](https://github.com/PolyJIT/benchbuild/commit/aaabc1b5e7a817304f9e948feb565f37916c68bb))
+* bump pathos & dill to unbreak gitlab ci ([bce45d8a](https://github.com/PolyJIT/benchbuild/commit/bce45d8a148e41914d6f56a18933f9faf26f58bf))
+* **ci:**
+ * disable mkdocs in github ci ([8540f880](https://github.com/PolyJIT/benchbuild/commit/8540f880607ddeae293fc6b9feb1f751fd9cc721))
+ * reorder CI steps (test) ([74379d53](https://github.com/PolyJIT/benchbuild/commit/74379d5350c35cc2c76600ae7784b78aefd1a7db))
+ * increase verbosity to max for all integation tasks ([b6625d31](https://github.com/PolyJIT/benchbuild/commit/b6625d31d444e2ea51dc923a80438f0eed4a962d))
+* **command:** use private label when fetching string representation ([d83aa666](https://github.com/PolyJIT/benchbuild/commit/d83aa6661d3e2e08a0849bd3b23f557bd086e5b6))
+* **commands:** preserve workload order when filtering ([3648dd5e](https://github.com/PolyJIT/benchbuild/commit/3648dd5e8d0c20a07141a05a94a4e1223f575399))
+* **setup:** unlock any major version of pygit2 ([b09d9248](https://github.com/PolyJIT/benchbuild/commit/b09d92489a57897d4e0ad39dcb8b99ce08133d36))
+* **wrapping:** remove unused code ([0d1c890d](https://github.com/PolyJIT/benchbuild/commit/0d1c890db0f674aab13b12432394699030114087))
+
+
+
+# Changelog
+
+
+## 6.6.4 (2023-03-16)
+
+
+
+
## 6.6.3 (2023-03-06)
diff --git a/docs/index.md b/docs/source/about.md
similarity index 100%
rename from docs/index.md
rename to docs/source/about.md
diff --git a/docs/advanced/cli.md b/docs/source/advanced/cli.md
similarity index 100%
rename from docs/advanced/cli.md
rename to docs/source/advanced/cli.md
diff --git a/docs/advanced/index.md b/docs/source/advanced/index.md
similarity index 100%
rename from docs/advanced/index.md
rename to docs/source/advanced/index.md
diff --git a/docs/basics/actions.md b/docs/source/basics/actions.md
similarity index 95%
rename from docs/basics/actions.md
rename to docs/source/basics/actions.md
index d01ba6c20..48b86fcfb 100644
--- a/docs/basics/actions.md
+++ b/docs/source/basics/actions.md
@@ -92,3 +92,10 @@ build directory.
The match is done exact and matches agains the ``source.versions()`` output of a
source. Only sources that are marked as expandable (``source.is_expandable``)
will be checked.
+
+```{eval-rst}
+.. automodule:: benchbuild.utils.actions
+ :members:
+ :undoc-members:
+ :show-inheritance:
+```
diff --git a/docs/source/basics/configuration.md b/docs/source/basics/configuration.md
new file mode 100644
index 000000000..213513f12
--- /dev/null
+++ b/docs/source/basics/configuration.md
@@ -0,0 +1,19 @@
+# Configure
+
+## Module: settings
+
+```{eval-rst}
+.. automodule:: benchbuild.settings
+ :members:
+ :undoc-members:
+ :show-inheritance:
+```
+
+## Module: utils.settings
+
+```{eval-rst}
+.. automodule:: benchbuild.utils.settings
+ :members:
+ :undoc-members:
+ :show-inheritance:
+```
diff --git a/docs/basics/containers.md b/docs/source/basics/containers.md
similarity index 85%
rename from docs/basics/containers.md
rename to docs/source/basics/containers.md
index 81e9bd443..802b5a122 100644
--- a/docs/basics/containers.md
+++ b/docs/source/basics/containers.md
@@ -1,3 +1,5 @@
+# Containers
+
Benchbuild allows the definition of container images to define the base system
all experiment runs run in for a given project.
@@ -103,3 +105,39 @@ Please refer to podman's documentation on how to setup podman properly on your
system.
Podman is supported up to version 2.2.1
+
+## Module: benchbuild.container
+
+```{eval-rst}
+.. automodule:: benchbuild.container
+ :members:
+ :undoc-members:
+ :show-inheritance:
+```
+
+## Module: benchbuild.environments.domain.declarative
+
+```{eval-rst}
+.. automodule:: benchbuild.environments.domain.declarative
+ :members:
+ :undoc-members:
+ :show-inheritance:
+```
+
+## Module: benchbuild.environments.domain.model
+
+```{eval-rst}
+.. automodule:: benchbuild.environments.domain.model
+ :members:
+ :undoc-members:
+ :show-inheritance:
+```
+
+## Module: benchbuild.environments.domain.commands
+
+```{eval-rst}
+.. automodule:: benchbuild.environments.domain.commands
+ :members:
+ :undoc-members:
+ :show-inheritance:
+```
diff --git a/docs/basics/index.md b/docs/source/basics/index.md
similarity index 100%
rename from docs/basics/index.md
rename to docs/source/basics/index.md
diff --git a/docs/concepts/command.md b/docs/source/concepts/command.md
similarity index 92%
rename from docs/concepts/command.md
rename to docs/source/concepts/command.md
index 63574e783..87fa17a2a 100644
--- a/docs/concepts/command.md
+++ b/docs/source/concepts/command.md
@@ -48,3 +48,12 @@ example of an available token is the above ``SourceRoot``.
BenchBuild offers project authors a way to tokenize path components for
commands. These can be used to refer to a project's root directory or
source directory in a generic way.
+
+## Module: command
+
+```{eval-rst}
+.. automodule:: benchbuild.command
+ :members:
+ :undoc-members:
+ :show-inheritance:
+```
diff --git a/docs/source/concepts/environments.md b/docs/source/concepts/environments.md
new file mode 100644
index 000000000..fa34659f8
--- /dev/null
+++ b/docs/source/concepts/environments.md
@@ -0,0 +1 @@
+# Environment
diff --git a/docs/concepts/experiments.md b/docs/source/concepts/experiments.md
similarity index 100%
rename from docs/concepts/experiments.md
rename to docs/source/concepts/experiments.md
diff --git a/docs/concepts/projects.md b/docs/source/concepts/projects.md
similarity index 100%
rename from docs/concepts/projects.md
rename to docs/source/concepts/projects.md
diff --git a/docs/source/concepts/source.md b/docs/source/concepts/source.md
new file mode 100644
index 000000000..edf9a57e5
--- /dev/null
+++ b/docs/source/concepts/source.md
@@ -0,0 +1,46 @@
+# Source
+
+## Base
+
+```{eval-rst}
+.. automodule:: benchbuild.source.base
+ :members:
+ :undoc-members:
+ :show-inheritance:
+```
+
+## Git
+
+```{eval-rst}
+.. automodule:: benchbuild.source.git
+ :members:
+ :undoc-members:
+ :show-inheritance:
+```
+
+## HTTP
+
+```{eval-rst}
+.. automodule:: benchbuild.source.http
+ :members:
+ :undoc-members:
+ :show-inheritance:
+```
+
+## RSync
+
+```{eval-rst}
+.. automodule:: benchbuild.source.rsync
+ :members:
+ :undoc-members:
+ :show-inheritance:
+```
+
+## Module: source
+
+```{eval-rst}
+.. automodule:: benchbuild.source
+ :members:
+ :undoc-members:
+ :show-inheritance:
+```
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 000000000..26382866f
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,62 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# For the full list of built-in configuration values, see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Project information -----------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
+
+import logging
+import os
+
+from pkg_resources import DistributionNotFound, get_distribution
+
+import benchbuild.utils
+
+# pylint: skip-file
+try:
+ __version__ = get_distribution("benchbuild").version
+except DistributionNotFound:
+ pass
+
+project = 'BenchBuild'
+copyright = '2023, Andreas Simbürger'
+author = 'Andreas Simbürger'
+release = __version__
+
+# -- General configuration ---------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
+
+extensions = [
+ 'myst_parser',
+ 'sphinx.ext.napoleon',
+ 'sphinx.ext.autodoc',
+ 'sphinx_autodoc_typehints',
+ 'sphinx.ext.napoleon',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.viewcode',
+ 'sphinx.ext.autosectionlabel',
+ 'sphinxcontrib.programoutput',
+ 'sphinx.ext.githubpages',
+]
+
+exclude_patterns = []
+
+source_suffix = {
+ '.rst': 'restructuredtext',
+ '.md': 'markdown',
+}
+
+# -- Options for HTML output -------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
+
+html_theme = 'press'
+pygments_style = 'monokai'
+html_static_path = ['_static']
+
+napoleon_google_docstring = True
+napoleon_use_admonition_for_examples = True
+
+# Configure MyST Parser
+#myst_gfm_only = True
+myst_enable_extensions = ["linkify"]
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644
index 000000000..9fcef7096
--- /dev/null
+++ b/docs/source/index.rst
@@ -0,0 +1,31 @@
+.. bencbuild documentation master file, created by
+ sphinx-quickstart on Tue Mar 28 01:42:59 2023.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to bencbuild's documentation!
+=====================================
+
+.. toctree::
+ :caption: Contents:
+
+ about.md
+ basics/index.md
+ basics/configuration.md
+ basics/containers.md
+ basics/actions.md
+ concepts/command.md
+ concepts/source.md
+ concepts/environments.md
+ concepts/projects.md
+ concepts/experiments.md
+ advanced/index.md
+ advanced/cli.md
+ CHANGELOG.md
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/mkdocs.yml b/mkdocs.yml
deleted file mode 100644
index 3a96b7d5b..000000000
--- a/mkdocs.yml
+++ /dev/null
@@ -1,47 +0,0 @@
-site_name: BenchBuild
-site_description: Empirical-Research Toolkit
-site_author: simbuerg
-site_url: https://polyjit.github.io/benchbuild
-repo_url: https://github.com/PolyJIT/benchbuild
-edit_uri: ""
-
-nav:
- - index.md
- - Getting Started:
- - basics/index.md
- - basics/configuration.md
- - basics/containers.md
- - basics/actions.md
- - Concepts:
- - concepts/source.md
- - concepts/environments.md
- - concepts/projects.md
- - concepts/experiments.md
- - Advanced Usage:
- - advanced/index.md
- - advanced/cli.md
- #- API: mkapi/api/benchbuild
- - About:
- - Release Notes: CHANGELOG.md
-
-plugins:
- - search
- - pheasant
- #- mkapi
-
-markdown_extensions:
- - footnotes
- - pymdownx.details
- - pymdownx.highlight
- - pymdownx.superfences
- - admonition
-
-theme:
- name: readthedocs
- highlightjs: true
- hljs_style: github
- hljs_languages:
- - bash
- - json
- - python
- - yaml
diff --git a/requirements.txt b/requirements.txt
index dd481b5d5..224e9a460 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,19 +1,18 @@
-attrs~=22.2
-dill==0.3.4
-Jinja2~=3.1
-parse~=1.19
-pathos~=0.2
-plumbum~=1.8
-psutil~=5.9
-psycopg2-binary~=2.9
-pygit2>=1.2
-pygtrie~=2.5
-pyparsing~=3.0
-PyYAML~=6.0
-result~=0.9.0
-rich~=12.5
-schema~=0.7.5
-SQLAlchemy~=1.4.46
-sqlalchemy-migrate~=0.13
-typing-extensions~=4.3.0
-virtualenv~=20.20
+attrs>=22
+dill>=0
+Jinja2>=3
+parse>=1
+pathos>=0
+plumbum>=1
+psutil>=5
+psycopg2-binary>=2
+pygit2>=1
+pygtrie>=2
+pyparsing>=3
+PyYAML>=6.0
+result>=0
+rich>=12
+schema>=0
+SQLAlchemy>=2
+typing-extensions>=4
+virtualenv>=20
diff --git a/setup.py b/setup.py
index dc5245f07..6076d849a 100644
--- a/setup.py
+++ b/setup.py
@@ -30,13 +30,11 @@
include_package_data=True,
setup_requires=["pytest-runner", "setuptools_scm"],
install_requires=[
- "Jinja2>=2.10,<4.0", "PyYAML>=5.1,<7.0", "attrs>=19.3,<23.0",
- "dill==0.3.4", "pathos~=0.2", "parse~=1.14", "plumbum~=1.6",
- "psutil~=5.6", "psycopg2-binary~=2.8", "pygit2>=1.2.1,<1.11.0",
- "pygtrie~=2.3", "pyparsing>=2.4,<4.0", "rich>=6.1,<13.0",
- "SQLAlchemy~=1.4.46", "sqlalchemy-migrate~=0.13",
- "typing-extensions>=3.7.4.3,<4.3.1.0", "virtualenv>=16.7,<21.0",
- "schema~=0.7.4", "result>=0.8,<0.10"
+ "Jinja2>=3", "PyYAML>=6", "attrs>=22", "dill>=0", "pathos>=0.3",
+ "parse>=1", "plumbum>=1", "psutil>=5", "psycopg2-binary>=2",
+ "pygit2>=1", "pygtrie>=2", "pyparsing>=3", "rich>=13",
+ "SQLAlchemy>=2", "typing-extensions>=4", "virtualenv>=20",
+ "schema>=0", "result>=0"
],
author="Andreas Simbuerger",
author_email="simbuerg@fim.uni-passau.de",