Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ENH: Switch to TOML and use Pydantic for validation #1432

Draft
wants to merge 3 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 66 additions & 0 deletions asv.conf.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
# The version of the config file format. Do not change, unless
# you know what you are doing.
version = 1

# The name of the project being benchmarked
project = "asv"

# The project's homepage
project_url = "https://github.com/airspeed-velocity/asv/"

# The URL or local path of the source code repository for the
# project being benchmarked
repo = "."

# List of branches to benchmark. If not provided, defaults to "main"
# (for git) or "default" (for mercurial).
branches = ["main"] # for git

# The DVCS being used. If not set, it will be automatically
# determined from "repo" by looking at the protocol in the URL
# (if remote), or by looking for special directories, such as
# ".git" (if local).
dvcs = "git"

# The tool to use to create environments. May be "conda",
# "virtualenv", "mamba" (above 3.8)
# or other value depending on the plugins in use.
# If missing or the empty string, the tool will be automatically
# determined by looking for tools on the PATH environment
# variable.
environment_type = "virtualenv"

# The base URL to show a commit for the project.
show_commit_url = "http://github.com/airspeed-velocity/asv/commit/"

# The Pythons you'd like to test against. If not provided, defaults
# to the current version of Python used to run `asv`.
pythons = ["3.12"]

# The directory (relative to the current directory) that benchmarks are stored in.
# If not provided, defaults to "benchmarks"
# benchmark_dir = "benchmarks"

# The directory (relative to the current directory) to cache the Python
# environments in. If not provided, defaults to "env"
env_dir = ".asv/env"

# The directory (relative to the current directory) that raw benchmark
# results are stored in. If not provided, defaults to "results".
results_dir = ".asv/results"

# The directory (relative to the current directory) that the html tree
# should be written to. If not provided, defaults to "html".
html_dir = ".asv/html"

# The matrix of dependencies to test. Each key is the name of a
# package (in PyPI) and the values are version numbers. An empty
# list or empty string indicates to just test against the default
# (latest) version. null indicates that the package is to not be
# installed. If the package to be tested is only available from
# PyPi, and the 'environment_type' is conda, then you can preface
# the package name by 'pip+', and the package will be installed via
# pip (with all the conda available packages installed first,
# followed by the pip installed packages).
[matrix.req]
six = []
162 changes: 162 additions & 0 deletions asv/schema.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,162 @@
from asv import util
import rtoml
import json

from pathlib import Path

from pydantic import (
BaseModel,
Field,
HttpUrl,
AnyUrl,
ConfigDict,
field_serializer,
)
from typing import Optional, Any, Union
from typing_extensions import Literal


class ASVConfig(BaseModel):
model_config = ConfigDict(use_attribute_docstrings=False)

@field_serializer("project_url", "repo")
def serialize_urls(self, _url: Optional[HttpUrl]):
return str(_url)

project: str = Field(..., description="The name of the project being benchmarked.")
project_url: Optional[HttpUrl] = Field(None)
"""
The URL to the homepage of the project.

This can point to anywhere, really, as it's only used for the link at the
top of the benchmark results page back to your project.
"""

repo: Union[AnyUrl, Path] = Field(...)
"""
The URL to the repository for the project.

The value can also be a path, relative to the location of the
configuration file. For example, if the benchmarks are stored in the
same repository as the project itself, and the configuration file is
located at ``benchmarks/asv.conf.json`` inside the repository, you can
set ``"repo": ".."`` to use the local repository.

Currently, only ``git`` and ``hg`` repositories are supported, so this must be
a URL that ``git`` or ``hg`` know how to clone from, for example::

- [email protected]:airspeed-velocity/asv.git
- https://github.com/airspeed-velocity/asv.git
- ssh://[email protected]/yt_analysis/yt
- hg+https://bitbucket.org/yt_analysis/yt

The repository may be readonly.
"""
repo_subdir: Optional[str] = Field(None)
"""
The relative path to your Python project inside the repository. This is
where its ``setup.py`` file is located.

If empty or omitted, the project is assumed to be located at the root of
the repository.
"""

build_command: Optional[list[str]] = Field(
default=[
"python setup.py build",
"python -mpip wheel --no-deps --no-index -w {build_cache_dir} {build_dir}",
],
description="Commands to rebuild the project.",
)
install_command: Optional[list[str]] = Field(
default=["in-dir={env_dir} python -mpip install {wheel_file}"],
description="Command to install the project.",
)
uninstall_command: Optional[list[str]] = Field(
default=["return-code=any python -mpip uninstall -y {project}"],
description="Command to uninstall the project.",
)

branches: Optional[list[str]] = Field(
None, description="List of branches to benchmark."
)
dvcs: Optional[str] = Field(None, description="The DVCS being used (e.g., 'git').")
environment_type: Optional[str] = Field(
None, description="The tool to use to create environments (e.g., 'virtualenv')."
)
install_timeout: Optional[int] = Field(
600, description="Timeout in seconds for installing dependencies."
)
show_commit_url: Optional[str] = Field(
None, description="Base URL to show a commit for the project."
)
pythons: Optional[list[str]] = Field(
None, description="List of Python versions to test against."
)
conda_channels: Optional[list[str]] = Field(
None, description="List of conda channels for dependency packages."
)
conda_environment_file: Optional[str] = Field(
None, description="A conda environment file for environment creation."
)

matrix: Optional[dict[str, dict[str, Union[list[Optional[str]], None]]]] = Field(
None,
description="Matrix of dependencies and environment variables to test.",
)
exclude: Optional[list[dict[str, Union[str, dict[str, Optional[str]]]]]] = Field(
None,
description="Combinations of libraries/python versions to exclude from testing.",
)
include: Optional[list[dict[str, Union[str, dict[str, Optional[str]]]]]] = Field(
None,
description="Combinations of libraries/python versions to include for testing.",
)

benchmark_dir: Optional[str] = Field(
None, description="Directory where benchmarks are stored."
)
env_dir: Optional[str] = Field(
None, description="Directory to cache Python environments."
)
results_dir: Optional[str] = Field(
None, description="Directory where raw benchmark results are stored."
)
html_dir: Optional[str] = Field(
None, description="Directory where the HTML tree is written."
)
hash_length: Optional[int] = Field(
8, description="Number of characters to retain in commit hashes."
)
build_cache_size: Optional[int] = Field(
2, description="Number of builds to cache per environment."
)
regressions_first_commits: Optional[dict[str, Optional[str]]] = Field(
None, description="Commits after which regression search starts."
)
regressions_thresholds: Optional[dict[str, float]] = Field(
None,
description="Thresholds for relative change in results to report regressions.",
)


# Example usage
config = ASVConfig(
project="MyProject",
project_url="https://example.com",
repo="https://github.com/example/repo",
matrix={
"req": {"numpy": ["1.6", "1.7"], "six": ["", None], "pip+emcee": [""]},
"env": {"ENV_VAR_1": ["val1", "val2"]},
"env_nobuild": {"ENV_VAR_2": ["val3", None]},
},
)

# Using model_dump with mode='json' to ensure proper serialization
# print(rtoml.dumps(config.model_dump(mode="toml")))
# print(json.dumps(config.model_dump(mode="toml"), indent=4))
mkconf = ASVConfig.model_validate_json(
Path("../asv_samples/asv.conf.json").open("rb").read()
)
# exclude_defaults=True to prevents "fat" outputs
print(json.dumps(mkconf.model_dump(mode="toml", exclude_defaults=True), indent=4))
4 changes: 4 additions & 0 deletions docs/source/asv.conf.json.rst
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
.. _conf-reference:


.. autopydantic_model:: asv.schema.ASVConfig


``asv.conf.json`` reference
===========================

Expand Down
3 changes: 2 additions & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,13 @@
"sphinxcontrib.bibtex",
"sphinx_collapse",
"autoapi.extension",
"sphinxcontrib.autodoc_pydantic",
]

autoapi_dirs = ["../../asv"]
autoapi_add_toc_entry = True
autoapi_keep_files = True
autoapi_ignore = ["*_version*", "*migrations*"]
autoapi_ignore = ["*_version*", "*migrations*", "*schema*"]
autoapi_options = [
"members",
"undoc-members",
Expand Down
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ dependencies = [
"tabulate",
"virtualenv",
"packaging",
"pydantic",
"importlib-metadata",
"tomli; python_version < '3.11'",
"colorama; platform_system == 'Windows'",
Expand Down Expand Up @@ -69,6 +70,7 @@ doc = [
"sphinxcontrib.bibtex",
"setuptools", # dependency from bibtex
"sphinxcontrib.katex",
"autodoc-pydantic",
"furo",
]
dev = [
Expand Down
Loading