From bedac800a3fe437a3e1a475c3f1e864eac8c436c Mon Sep 17 00:00:00 2001 From: Tomas Dvorak Date: Wed, 19 Feb 2025 16:00:17 +0100 Subject: [PATCH] feat: init python package Signed-off-by: Tomas Dvorak --- python/.embedmeignore | 1 + python/.env.example | 19 + python/.gitignore | 166 + python/CONTRIBUTING.md | 223 ++ python/LICENSE | 201 + python/README.md | 142 + python/SECURITY.md | 15 + python/beeai_framework/__init__.py | 42 + python/beeai_framework/adapters/__init__.py | 1 + .../beeai_framework/adapters/litellm/chat.py | 158 + .../adapters/ollama/__init__.py | 1 + .../adapters/ollama/backend/__init__.py | 1 + .../adapters/ollama/backend/chat.py | 19 + .../adapters/watsonx/__init__.py | 1 + .../adapters/watsonx/backend/__init__.py | 1 + .../adapters/watsonx/backend/chat.py | 19 + python/beeai_framework/agents/__init__.py | 6 + python/beeai_framework/agents/base.py | 60 + python/beeai_framework/agents/bee/__init__.py | 5 + python/beeai_framework/agents/bee/agent.py | 165 + python/beeai_framework/agents/errors.py | 10 + python/beeai_framework/agents/runners/base.py | 130 + .../agents/runners/default/prompts.py | 138 + .../agents/runners/default/runner.py | 171 + .../agents/runners/granite/prompts.py | 80 + .../agents/runners/granite/runner.py | 53 + python/beeai_framework/agents/types.py | 75 + python/beeai_framework/backend/__init__.py | 25 + python/beeai_framework/backend/chat.py | 266 ++ python/beeai_framework/backend/constants.py | 28 + python/beeai_framework/backend/errors.py | 25 + python/beeai_framework/backend/message.py | 141 + python/beeai_framework/backend/utils.py | 66 + python/beeai_framework/cancellation.py | 76 + python/beeai_framework/context.py | 160 + python/beeai_framework/emitter/__init__.py | 27 + python/beeai_framework/emitter/emitter.py | 192 + python/beeai_framework/emitter/errors.py | 10 + python/beeai_framework/emitter/types.py | 24 + python/beeai_framework/emitter/utils.py | 17 + python/beeai_framework/errors.py | 179 + python/beeai_framework/llms/__init__.py | 13 + python/beeai_framework/llms/base_output.py | 27 + python/beeai_framework/llms/llm.py | 133 + python/beeai_framework/llms/output.py | 29 + python/beeai_framework/memory/__init__.py | 34 + python/beeai_framework/memory/base_cache.py | 109 + python/beeai_framework/memory/base_memory.py | 83 + python/beeai_framework/memory/errors.py | 32 + python/beeai_framework/memory/file_cache.py | 238 ++ .../beeai_framework/memory/readonly_memory.py | 34 + python/beeai_framework/memory/serializable.py | 97 + python/beeai_framework/memory/serializer.py | 254 ++ .../beeai_framework/memory/sliding_cache.py | 111 + .../beeai_framework/memory/sliding_memory.py | 129 + .../memory/summarize_memory.py | 76 + python/beeai_framework/memory/task_map.py | 144 + python/beeai_framework/memory/token_memory.py | 123 + .../memory/unconstrained_cache.py | 161 + .../memory/unconstrained_memory.py | 37 + python/beeai_framework/parsers/line_prefix.py | 41 + python/beeai_framework/tools/__init__.py | 18 + python/beeai_framework/tools/errors.py | 13 + python/beeai_framework/tools/mcp_tools.py | 80 + .../beeai_framework/tools/search/__init__.py | 6 + python/beeai_framework/tools/search/base.py | 28 + .../tools/search/duckduckgo.py | 56 + .../beeai_framework/tools/search/wikipedia.py | 15 + python/beeai_framework/tools/tool.py | 133 + .../tools/weather/openmeteo.py | 124 + python/beeai_framework/utils/__init__.py | 8 + python/beeai_framework/utils/_types.py | 7 + python/beeai_framework/utils/config.py | 18 + python/beeai_framework/utils/counter.py | 23 + python/beeai_framework/utils/custom_logger.py | 107 + python/beeai_framework/utils/errors.py | 17 + python/beeai_framework/utils/events.py | 11 + python/beeai_framework/utils/models.py | 20 + python/beeai_framework/utils/regex.py | 11 + python/beeai_framework/utils/templates.py | 41 + python/beeai_framework/workflows/__init__.py | 4 + python/beeai_framework/workflows/agent.py | 96 + python/beeai_framework/workflows/errors.py | 8 + python/beeai_framework/workflows/workflow.py | 145 + python/cz_commitizen/__init__.py | 17 + python/cz_commitizen/monorepo_commits.py | 73 + python/docs/CODE_OF_CONDUCT.md | 53 + python/docs/CONTRIBUTING.md | 223 ++ python/docs/README.md | 142 + python/docs/SECURITY.md | 15 + python/docs/_sidebar.md | 25 + python/docs/agents.md | 94 + python/docs/assets/Bee_Dark.svg | 16 + python/docs/backend.md | 109 + python/docs/cache.md | 102 + python/docs/emitter.md | 57 + python/docs/errors.md | 61 + python/docs/examples.md | 145 + python/docs/index.html | 97 + python/docs/instrumentation.md | 74 + python/docs/integrations.md | 13 + python/docs/logger.md | 27 + python/docs/memory.md | 379 ++ python/docs/overview.md | 67 + python/docs/searxng-tool.md | 5 + python/docs/serialization.md | 69 + python/docs/sql-tool.md | 4 + python/docs/templates.md | 152 + python/docs/tools.md | 346 ++ python/docs/tutorials.md | 3 + python/docs/version.md | 20 + python/docs/workflows.md | 372 ++ python/examples/README.md | 145 + python/examples/agents/README.md | 7 + python/examples/agents/bee.py | 162 + python/examples/agents/bee_advanced.py | 195 + python/examples/agents/granite.py | 41 + python/examples/agents/memory.py | 24 + python/examples/agents/requirements.txt | 2 + python/examples/agents/simple.py | 20 + python/examples/backend/providers/ollama.py | 108 + python/examples/backend/providers/watsonx.py | 84 + python/examples/basic.py | 22 + python/examples/helpers/io.py | 19 + python/examples/llms.py | 51 + python/examples/memory/agentMemory.py | 79 + python/examples/memory/slidingMemory.py | 42 + python/examples/memory/summarizeMemory.py | 46 + python/examples/memory/tokenMemory.py | 63 + python/examples/memory/unconstrainedMemory.py | 33 + python/examples/notebooks/README.md | 36 + python/examples/notebooks/agents.ipynb | 283 ++ python/examples/notebooks/basics.ipynb | 350 ++ python/examples/notebooks/requirements.txt | 1 + python/examples/notebooks/searXNG.md | 52 + python/examples/notebooks/workflows.ipynb | 402 ++ python/examples/requirements.txt | 2 + python/examples/runners/basic_runner.py | 86 + python/examples/templates/agent_sys_prompt.py | 17 + python/examples/templates/basic_functions.py | 26 + python/examples/templates/basic_template.py | 18 + python/examples/tools/decorator.py | 59 + python/examples/tools/duckduckgo.py | 22 + python/examples/tools/openmeteo.py | 22 + python/examples/version.py | 1 + python/examples/workflows/advanced.py | 61 + python/examples/workflows/memory.py | 44 + python/examples/workflows/multi_agents.py | 60 + python/examples/workflows/simple.py | 29 + python/examples/workflows/web_agent.py | 92 + python/poetry.lock | 3295 +++++++++++++++++ python/pyproject.toml | 267 ++ python/scripts/copyright.sh | 55 + python/tests/__init__.py | 0 python/tests/backend/test_chatmodel.py | 127 + python/tests/backend/test_message.py | 95 + python/tests/conftest.py | 8 + python/tests/errors_test.py | 163 + python/tests/runners/test_default_runner.py | 45 + python/tests/templates/test_templates.py | 62 + python/tests/tools/test_duckduckgo.py | 26 + python/tests/tools/test_mcp_tool.py | 133 + python/tests/tools/test_opemmeteo.py | 57 + python/tests/utils/test_custom_logger.py | 19 + python/tests/workflows/multi_agents.py | 50 + python/tests/workflows/test_workflow.py | 139 + 166 files changed, 16140 insertions(+) create mode 100644 python/.embedmeignore create mode 100644 python/.env.example create mode 100644 python/.gitignore create mode 100644 python/CONTRIBUTING.md create mode 100644 python/LICENSE create mode 100644 python/README.md create mode 100644 python/SECURITY.md create mode 100644 python/beeai_framework/__init__.py create mode 100644 python/beeai_framework/adapters/__init__.py create mode 100644 python/beeai_framework/adapters/litellm/chat.py create mode 100644 python/beeai_framework/adapters/ollama/__init__.py create mode 100644 python/beeai_framework/adapters/ollama/backend/__init__.py create mode 100644 python/beeai_framework/adapters/ollama/backend/chat.py create mode 100644 python/beeai_framework/adapters/watsonx/__init__.py create mode 100644 python/beeai_framework/adapters/watsonx/backend/__init__.py create mode 100644 python/beeai_framework/adapters/watsonx/backend/chat.py create mode 100644 python/beeai_framework/agents/__init__.py create mode 100644 python/beeai_framework/agents/base.py create mode 100644 python/beeai_framework/agents/bee/__init__.py create mode 100644 python/beeai_framework/agents/bee/agent.py create mode 100644 python/beeai_framework/agents/errors.py create mode 100644 python/beeai_framework/agents/runners/base.py create mode 100644 python/beeai_framework/agents/runners/default/prompts.py create mode 100644 python/beeai_framework/agents/runners/default/runner.py create mode 100644 python/beeai_framework/agents/runners/granite/prompts.py create mode 100644 python/beeai_framework/agents/runners/granite/runner.py create mode 100644 python/beeai_framework/agents/types.py create mode 100644 python/beeai_framework/backend/__init__.py create mode 100644 python/beeai_framework/backend/chat.py create mode 100644 python/beeai_framework/backend/constants.py create mode 100644 python/beeai_framework/backend/errors.py create mode 100644 python/beeai_framework/backend/message.py create mode 100644 python/beeai_framework/backend/utils.py create mode 100644 python/beeai_framework/cancellation.py create mode 100644 python/beeai_framework/context.py create mode 100644 python/beeai_framework/emitter/__init__.py create mode 100644 python/beeai_framework/emitter/emitter.py create mode 100644 python/beeai_framework/emitter/errors.py create mode 100644 python/beeai_framework/emitter/types.py create mode 100644 python/beeai_framework/emitter/utils.py create mode 100644 python/beeai_framework/errors.py create mode 100644 python/beeai_framework/llms/__init__.py create mode 100644 python/beeai_framework/llms/base_output.py create mode 100644 python/beeai_framework/llms/llm.py create mode 100644 python/beeai_framework/llms/output.py create mode 100644 python/beeai_framework/memory/__init__.py create mode 100644 python/beeai_framework/memory/base_cache.py create mode 100644 python/beeai_framework/memory/base_memory.py create mode 100644 python/beeai_framework/memory/errors.py create mode 100644 python/beeai_framework/memory/file_cache.py create mode 100644 python/beeai_framework/memory/readonly_memory.py create mode 100644 python/beeai_framework/memory/serializable.py create mode 100644 python/beeai_framework/memory/serializer.py create mode 100644 python/beeai_framework/memory/sliding_cache.py create mode 100644 python/beeai_framework/memory/sliding_memory.py create mode 100644 python/beeai_framework/memory/summarize_memory.py create mode 100644 python/beeai_framework/memory/task_map.py create mode 100644 python/beeai_framework/memory/token_memory.py create mode 100644 python/beeai_framework/memory/unconstrained_cache.py create mode 100644 python/beeai_framework/memory/unconstrained_memory.py create mode 100644 python/beeai_framework/parsers/line_prefix.py create mode 100644 python/beeai_framework/tools/__init__.py create mode 100644 python/beeai_framework/tools/errors.py create mode 100644 python/beeai_framework/tools/mcp_tools.py create mode 100644 python/beeai_framework/tools/search/__init__.py create mode 100644 python/beeai_framework/tools/search/base.py create mode 100644 python/beeai_framework/tools/search/duckduckgo.py create mode 100644 python/beeai_framework/tools/search/wikipedia.py create mode 100644 python/beeai_framework/tools/tool.py create mode 100644 python/beeai_framework/tools/weather/openmeteo.py create mode 100644 python/beeai_framework/utils/__init__.py create mode 100644 python/beeai_framework/utils/_types.py create mode 100644 python/beeai_framework/utils/config.py create mode 100644 python/beeai_framework/utils/counter.py create mode 100644 python/beeai_framework/utils/custom_logger.py create mode 100644 python/beeai_framework/utils/errors.py create mode 100644 python/beeai_framework/utils/events.py create mode 100644 python/beeai_framework/utils/models.py create mode 100644 python/beeai_framework/utils/regex.py create mode 100644 python/beeai_framework/utils/templates.py create mode 100644 python/beeai_framework/workflows/__init__.py create mode 100644 python/beeai_framework/workflows/agent.py create mode 100644 python/beeai_framework/workflows/errors.py create mode 100644 python/beeai_framework/workflows/workflow.py create mode 100644 python/cz_commitizen/__init__.py create mode 100644 python/cz_commitizen/monorepo_commits.py create mode 100644 python/docs/CODE_OF_CONDUCT.md create mode 100644 python/docs/CONTRIBUTING.md create mode 100644 python/docs/README.md create mode 100644 python/docs/SECURITY.md create mode 100644 python/docs/_sidebar.md create mode 100644 python/docs/agents.md create mode 100644 python/docs/assets/Bee_Dark.svg create mode 100644 python/docs/backend.md create mode 100644 python/docs/cache.md create mode 100644 python/docs/emitter.md create mode 100644 python/docs/errors.md create mode 100644 python/docs/examples.md create mode 100644 python/docs/index.html create mode 100644 python/docs/instrumentation.md create mode 100644 python/docs/integrations.md create mode 100644 python/docs/logger.md create mode 100644 python/docs/memory.md create mode 100644 python/docs/overview.md create mode 100644 python/docs/searxng-tool.md create mode 100644 python/docs/serialization.md create mode 100644 python/docs/sql-tool.md create mode 100644 python/docs/templates.md create mode 100644 python/docs/tools.md create mode 100644 python/docs/tutorials.md create mode 100644 python/docs/version.md create mode 100644 python/docs/workflows.md create mode 100644 python/examples/README.md create mode 100644 python/examples/agents/README.md create mode 100644 python/examples/agents/bee.py create mode 100644 python/examples/agents/bee_advanced.py create mode 100644 python/examples/agents/granite.py create mode 100644 python/examples/agents/memory.py create mode 100644 python/examples/agents/requirements.txt create mode 100644 python/examples/agents/simple.py create mode 100644 python/examples/backend/providers/ollama.py create mode 100644 python/examples/backend/providers/watsonx.py create mode 100644 python/examples/basic.py create mode 100644 python/examples/helpers/io.py create mode 100644 python/examples/llms.py create mode 100644 python/examples/memory/agentMemory.py create mode 100644 python/examples/memory/slidingMemory.py create mode 100644 python/examples/memory/summarizeMemory.py create mode 100644 python/examples/memory/tokenMemory.py create mode 100644 python/examples/memory/unconstrainedMemory.py create mode 100644 python/examples/notebooks/README.md create mode 100644 python/examples/notebooks/agents.ipynb create mode 100644 python/examples/notebooks/basics.ipynb create mode 100644 python/examples/notebooks/requirements.txt create mode 100644 python/examples/notebooks/searXNG.md create mode 100644 python/examples/notebooks/workflows.ipynb create mode 100644 python/examples/requirements.txt create mode 100644 python/examples/runners/basic_runner.py create mode 100644 python/examples/templates/agent_sys_prompt.py create mode 100644 python/examples/templates/basic_functions.py create mode 100644 python/examples/templates/basic_template.py create mode 100644 python/examples/tools/decorator.py create mode 100644 python/examples/tools/duckduckgo.py create mode 100644 python/examples/tools/openmeteo.py create mode 100644 python/examples/version.py create mode 100644 python/examples/workflows/advanced.py create mode 100644 python/examples/workflows/memory.py create mode 100644 python/examples/workflows/multi_agents.py create mode 100644 python/examples/workflows/simple.py create mode 100644 python/examples/workflows/web_agent.py create mode 100644 python/poetry.lock create mode 100644 python/pyproject.toml create mode 100755 python/scripts/copyright.sh create mode 100644 python/tests/__init__.py create mode 100644 python/tests/backend/test_chatmodel.py create mode 100644 python/tests/backend/test_message.py create mode 100644 python/tests/conftest.py create mode 100644 python/tests/errors_test.py create mode 100644 python/tests/runners/test_default_runner.py create mode 100644 python/tests/templates/test_templates.py create mode 100644 python/tests/tools/test_duckduckgo.py create mode 100644 python/tests/tools/test_mcp_tool.py create mode 100644 python/tests/tools/test_opemmeteo.py create mode 100644 python/tests/utils/test_custom_logger.py create mode 100644 python/tests/workflows/multi_agents.py create mode 100644 python/tests/workflows/test_workflow.py diff --git a/python/.embedmeignore b/python/.embedmeignore new file mode 100644 index 00000000..ca5368c3 --- /dev/null +++ b/python/.embedmeignore @@ -0,0 +1 @@ +docs/README.md diff --git a/python/.env.example b/python/.env.example new file mode 100644 index 00000000..b93a37b2 --- /dev/null +++ b/python/.env.example @@ -0,0 +1,19 @@ +######################## +### BeeAI configuration +######################## + +BEEAI_LOG_LEVEL=INFO + +######################## +### OpenAI specific configuration +######################## + +# OPENAI_API_KEY=your-openai-api-key + +######################## +### watsonx specific configuration +######################## + +# WATSONX_URL=your-watsonx-instance-base-url +# WATSONX_PROJECT_ID=your-watsonx-project-id +# WATSONX_APIKEY=your-watsonx-api-key diff --git a/python/.gitignore b/python/.gitignore new file mode 100644 index 00000000..04863b4c --- /dev/null +++ b/python/.gitignore @@ -0,0 +1,166 @@ +# MacOS +.DS_Store + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +.idea/ + +# Visual Studio Code +.vscode diff --git a/python/CONTRIBUTING.md b/python/CONTRIBUTING.md new file mode 100644 index 00000000..d77a9019 --- /dev/null +++ b/python/CONTRIBUTING.md @@ -0,0 +1,223 @@ +# Contributing + +BeeAI Python is an open-source project committed to bringing LLM agents to people of all backgrounds. This page describes how you can join the BeeAI community in this goal. + +## Before you start + +If you are new to BeeAI contributing, we recommend you do the following before diving into the code: + +- Read [Code of Conduct](./CODE_OF_CONDUCT.md). + +## Style and lint + +BeeAI Python uses the following tools to meet code quality standards and ensure a unified code style across the codebase: + +We use the following libs to check the Python code: + +- [Black](https://black.readthedocs.io/) - Code Formatter +- [Ruff](https://beta.ruff.rs/docs/) - Fast Python linter + +Simple [scripts for Poetry](dev_tools/scripts.py) are included to help you to review your changes and commit them. + +## Issues and pull requests + +We use GitHub pull requests to accept contributions. + +While not required, opening a new issue about the bug you're fixing or the feature you're working on before you open a pull request is important in starting a discussion with the community about your work. The issue gives us a place to talk about the idea and how we can work together to implement it in the code. It also lets the community know what you're working on, and if you need help, you can reference the issue when discussing it with other community and team members. + +If you've written some code but need help finishing it, want to get initial feedback on it before finishing it, or want to share it and discuss it prior to completing the implementation, you can open a Draft pull request and prepend the title with the [WIP] tag (for Work In Progress). This will indicate to reviewers that the code in the PR isn't in its final state and will change. It also means we will only merge the commit once it is finished. You or a reviewer can remove the [WIP] tag when the code is ready to be thoroughly reviewed for merging. + +## Choose an issue to work on + +BeeAI Python uses the following labels to help non-maintainers find issues best suited to their interests and experience level: + +- [good first issue](https://github.com/i-am-bee/beeai-framework/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) - these issues are typically the simplest available to work on, ideal for newcomers. They should already be fully scoped, with a straightforward approach outlined in the descriptions. +- [help wanted](https://github.com/i-am-bee/beeai-framework/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) - these issues are generally more complex than good first issues. They typically cover work that core maintainers don't currently have the capacity to implement and may require more investigation/discussion. These are great options for experienced contributors looking for something more challenging. + +## Setting up a local development environment + +### Prerequisites + +For development, there are some tools you will need prior cloning the code. + +#### Python +We recommend using Python 3.11 or higher. First, ensure you have Python installed: + +```bash +python --version +``` + +#### Poetry + +[Poetry](https://python-poetry.org/) is a tool for Python packaging, dependency and virtual environment management that is used to manage the development of this project. Verify version two (V2) is installed on your machine. There are several ways to install it including through the package manager of your operating system, however, the easiest way to install is using the official installer, as follows: + +```bash +curl -sSL https://install.python-poetry.org | python3 - +``` + +You can also use `pip` and `pipx` to install poetry. + +Once you have Poetry installed, you will also need to add the poetry shell plugin: + +```bash +poetry self add poetry-plugin-shell +``` + +> [!IMPORTANT] +> You must have poetry >= 2.0 installed + +### Clone and set up the code + +Follow these steps: + +```bash +# Clone the repository +git clone https://github.com/i-am-bee/beeai-framework.git + +# Ensure you have the pre-commit hooks installed +pre-commit install + +# Use Poetry to install the project dependencies and activate a virtual environment +poetry install +poetry shell + +# Copy .env.example to .env and fill in required values +cp .env.example .env +``` + +### Build the pip package + +#### Build the package: + +```bash +poetry build +``` + +#### Test the Build Locally (Recommended) + +Note: This should be be done outside an existing virtual environment or poetry shell. + +```bash +# Create a virtual environment +python -m venv test_env + +source test_env/bin/activate # On Windows: test_env\Scripts\activate + +# Install the built package +pip install dist/beeai-framework-0.1.0.tar.gz +``` + +#### Publish to TestPyPI + +```bash +# Configure Poetry: +poetry config repositories.testpypi https://test.pypi.org/legacy/ +# Publish +poetry publish -r testpypi +#Test the installation +pip install --index-url https://test.pypi.org/simple/ beeai-framework +``` + +#### Run Linters/Formatters +Ensure your changes meet code quality standards: + +- lint: use the next command run Black and Ruff: + +```bash +poetry run lint +``` + +#### Run Tests +Ensure your changes pass all tests: + +```bash +# Run unit tests +pytest tests/unit +# Run integration tests +pytest tests/integration +# Run E2E tests +pytest tests/e2e +``` + +#### Follow Conventional Commit Messages +We use [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/#summary) to structure our commit messages. Please use the following format: + +``` +(): +``` + +- Type: feat, fix, chore, docs, style, refactor, perf, test, etc. +- Scope: The area of the codebase your changes affect (optional). The allowed values are: adapters, agents, llms, tools, cache, emitter, internals, logger, memory, serializer, infra, deps, instrumentation +- Subject: A short description of the changes (required) + +_Example:_ + +``` +feat(llm): add streaming support for watsonx adapter + +Ref: #15 +``` + +#### Commit: + + - commit: for convenience you can use the following command to sign-off your commit with `-s` and generate the commit. + +```bash +poetry run commit "(): " +``` + +By following these steps, you'll be all set to contribute to our project! If you encounter any issues during the setup process, please feel free to open an issue. + +## Updating examples and embedding +Currently [embedme](https://github.com/zakhenry/embedme) is used to embed code examples directly in documentation. Supported file types can be found [here](https://github.com/zakhenry/embedme?tab=readme-ov-file#multi-language). + +Once an example is edited or a new one is created and referenced running the following command will update the documentation. + +```bash +poetry run embedme +``` + +## Legal + +The following sections detail important legal information that should be viewed prior to contribution. + +### License and Copyright + +Distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0). + +SPDX-License-Identifier: [Apache-2.0](https://spdx.org/licenses/Apache-2.0) + +If you would like to see the detailed LICENSE click [here](/LICENSE). + +### Developer Certificate of Origin (DCO) + +We have tried to make it as easy as possible to make contributions. This applies to how we handle the legal aspects of contribution. We use the same approach - the [Developer's Certificate of Origin 1.1 (DCO)](https://developercertificate.org/) - that the Linuxยฎ Kernel [community](https://docs.kernel.org/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin) uses to manage code contributions. + +We ask that when submitting a patch for review, the developer must include a sign-off statement in the commit message. If you set your `user.name` and `user.email` in your `git config` file, you can sign your commit automatically by using the following command: + +```bash +git commit -s +``` + +If a commit has already been created but signoff was missed this can be remedied + +```bash +git --amend -s +``` + +The following example includes a `Signed-off-by:` line, which indicates that the submitter has accepted the DCO: + +```txt +Signed-off-by: John Doe +``` + +We automatically verify that all commit messages contain a `Signed-off-by:` line with your email address. + +#### Useful tools for doing DCO signoffs + +While the web ui natively supports this now, there are a number of tools that make it easier for developers to manage DCO signoffs if not using the web interface. + +- DCO command line tool, which lets you do a single signoff for an entire repo ( ) +- GitHub UI integrations for adding the signoff automatically ( ) +- Chrome - +- Firefox - diff --git a/python/LICENSE b/python/LICENSE new file mode 100644 index 00000000..b09cd785 --- /dev/null +++ b/python/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/python/README.md b/python/README.md new file mode 100644 index 00000000..bedd1a9b --- /dev/null +++ b/python/README.md @@ -0,0 +1,142 @@ +> [!WARNING] +> PRE-Alpha! Please reach out if you want to get involved in the discussions. All feedback is welcomed + +

+ BeeAI Framework logo +

BeeAI Agent Framework for Python

+

+ +

+ Project Status: Alpha +

Python implementation of the BeeAI Agent Framework for building, deploying, and serving powerful agentic workflows at scale.

+

+ +The BeeAI Agent Framework for Python makes it easy to build scalable agent-based workflows with your model of choice. This framework is designed to perform robustly with [IBM Granite](https://www.ibm.com/granite?adoper=255252_0_LS1) and [Llama 3.x](https://ai.meta.com/blog/meta-llama-3-1/) models. Varying level of support is currently available for [other LLMs using LiteLLM](https://docs.litellm.ai/docs/providers). We're actively working on optimizing its performance with these popular LLMs. + +Our goal is to empower Python developers to adopt the latest open-source and proprietary models with minimal changes to their current agent implementation. + +## Key Features + +- ๐Ÿค– **AI agents**: Use our powerful BeeAI agent refined for Llama 3.x and Granite 3.x, or build your own. +- ๐Ÿ› ๏ธ **Tools**: Use our built-in tools or create your own in Python. +- ๐Ÿ’พ **Memory**: Multiple strategies to optimize token spend. +- ... more on our Roadmap + +## Getting Started + +### Installation + +```bash +pip install beeai-framework +``` + +### Quick Example + +```py +from beeai_framework import BeeAgent, LLM + +agent = BeeAgent(llm=LLM()) + +agent.run("What is the capital of Massachusetts") +``` + +> [!NOTE] +> To run this example, ensure you have [ollama](https://ollama.com) installed with the [llama3.1](https://ollama.com/library/llama3.1) model downloaded. + +to run other examples you can use, "python -m examples/beeai/[example_name]": + +```bash +python examples/basic.py +``` + +## Local Development + +Please check our [contributing guide](./CONTRIBUTING.md) + +### Prerequisites + +For development, there are some tools you will need prior cloning the code. + +#### Poetry + +[Poetry](https://python-poetry.org/) is a tool for Python packaging, dependency and virtual environment management that is used to manage the development of this project. Verify version 2 is installed on your machine. There are several ways to install it including through the package manager of your operating system, however, the easiest way to install is using the official installer, as follows: + +```bash +curl -sSL https://install.python-poetry.org | python3 - +``` + +You can also use `pip` and `pipx` to install poetry. + +Once you have Poetry installed, you will also need to add the poetry shell plugin: + +```bash +poetry self add poetry-plugin-shell +``` + +> [!IMPORTANT] +> You must have poetry >= 2.0 installed + +### Clone and set up the code + +Follow these steps: + +```bash +# Clone the repository +git clone https://github.com/i-am-bee/beeai-framework + +cd python + +# Use Poetry to install the project dependencies and activate a virtual environment +poetry install +poetry shell + +# Copy .env.example to .env and fill in required values +cp .env.example .env +``` + +### Build the pip package + +#### Build the package: + +```bash +poetry build +``` + +## Modules + +The package provides several modules: + +| Module | Description | +| -------- | ----------------------------------------------------- | +| `agents` | Base classes defining the common interface for agents | +| `llms` | Base classes for text inference (standard or chat) | +| `tools` | Tools that an agent can use | + +## Roadmap + +- ๐Ÿ‘ฉโ€๐Ÿ’ป **Code interpreter**: Run code safely in a sandbox container. +- โธ๏ธ **Serialization**: Handle complex agentic workflows and easily pause/resume them without losing state. +- ๐Ÿ” **Instrumentation**: Full visibility of your agent's inner workings. +- ๐ŸŽ›๏ธ **Production-level** control with caching and error handling. +- ๐Ÿ” **API**: OpenAI-compatible Assistants API integration. +- BeeAI agent performance optimization with additional models +- Examples, tutorials, and comprehensive documentation +- Improvements to building custom agents +- Multi-agent orchestration +- Feature parity with TypeScript version + +## Contributing + +The BeeAI Agent Framework for Python is an open-source project and we โค๏ธ contributions. Please check our [contribution guidelines](./CONTRIBUTING.md) before getting started. + +### Reporting Issues + +We use [GitHub Issues](https://github.com/i-am-bee/beeai-framework/issues) to track public bugs. Please check existing issues before filing new ones. + +### Code of Conduct + +This project adheres to our [Code of Conduct](./CODE_OF_CONDUCT.md). By participating, you are expected to uphold this code. + +## Legal Notice + +Initial content in these repositories including code has been provided by IBM under the associated open source software license and IBM is under no obligation to provide enhancements, updates, or support. IBM developers produced this code as an open source project (not as an IBM product), and IBM makes no assertions as to the level of quality nor security, and will not be maintaining this code going forward. diff --git a/python/SECURITY.md b/python/SECURITY.md new file mode 100644 index 00000000..656f559d --- /dev/null +++ b/python/SECURITY.md @@ -0,0 +1,15 @@ +# Security Policy + +## Reporting a Vulnerability + +To report vulnerabilities, you can privately report a potential security issue +via the GitHub security vulnerabilities feature. This can be done here: + +https://github.com/i-am-bee/beeai-framework/security/advisories + +Please do **not** open a public issue about a potential security vulnerability. + +You can find more details on the security vulnerability feature in the GitHub +documentation here: + +https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability diff --git a/python/beeai_framework/__init__.py b/python/beeai_framework/__init__.py new file mode 100644 index 00000000..a1780274 --- /dev/null +++ b/python/beeai_framework/__init__.py @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: Apache-2.0 +from beeai_framework.agents import BaseAgent +from beeai_framework.agents.bee.agent import BeeAgent +from beeai_framework.backend import ( + AssistantMessage, + CustomMessage, + Message, + Role, + SystemMessage, + ToolMessage, + UserMessage, +) +from beeai_framework.llms import LLM, AgentInput, BaseLLM +from beeai_framework.memory import BaseMemory, ReadOnlyMemory, TokenMemory, UnconstrainedMemory +from beeai_framework.memory.serializable import Serializable +from beeai_framework.tools import Tool, tool +from beeai_framework.tools.weather.openmeteo import OpenMeteoTool +from beeai_framework.utils.templates import Prompt + +__all__ = [ + "LLM", + "AgentInput", + "AssistantMessage", + "BaseAgent", + "BaseLLM", + "BaseMemory", + "BeeAgent", + "CustomMessage", + "Message", + "OpenMeteoTool", + "Prompt", + "ReadOnlyMemory", + "Role", + "Serializable", + "SystemMessage", + "TokenMemory", + "Tool", + "ToolMessage", + "UnconstrainedMemory", + "UserMessage", + "tool", +] diff --git a/python/beeai_framework/adapters/__init__.py b/python/beeai_framework/adapters/__init__.py new file mode 100644 index 00000000..98813136 --- /dev/null +++ b/python/beeai_framework/adapters/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: Apache-2.0 diff --git a/python/beeai_framework/adapters/litellm/chat.py b/python/beeai_framework/adapters/litellm/chat.py new file mode 100644 index 00000000..b7e4ae89 --- /dev/null +++ b/python/beeai_framework/adapters/litellm/chat.py @@ -0,0 +1,158 @@ +# SPDX-License-Identifier: Apache-2.0 + +import json +from collections.abc import AsyncGenerator +from typing import Any + +import litellm +from litellm import ( + ModelResponse, + ModelResponseStream, + acompletion, + get_supported_openai_params, +) +from pydantic import BaseModel, ConfigDict + +from beeai_framework.backend.chat import ( + ChatModel, + ChatModelInput, + ChatModelOutput, + ChatModelStructureInput, + ChatModelStructureOutput, +) +from beeai_framework.backend.errors import ChatModelError +from beeai_framework.backend.message import AssistantMessage, Message, Role, ToolMessage +from beeai_framework.backend.utils import parse_broken_json +from beeai_framework.context import RunContext +from beeai_framework.tools.tool import Tool +from beeai_framework.utils.custom_logger import BeeLogger + +logger = BeeLogger(__name__) + + +class LiteLLMParameters(BaseModel): + model: str + messages: list[dict[str, Any]] + tools: list[dict[str, Any]] | None = None + response_format: dict[str, Any] | type[BaseModel] | None = None + + model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True) + + +class LiteLLMChatModel(ChatModel): + @property + def model_id(self) -> str: + return self._model_id + + @property + def provider_id(self) -> str: + return self._provider_id + + def __init__(self, model_id: str | None = None, **settings: Any) -> None: + llm_provider = "ollama_chat" if self.provider_id == "ollama" else self.provider_id + self.supported_params = get_supported_openai_params(model=self.model_id, custom_llm_provider=llm_provider) + # drop any unsupported parameters that were passed in + litellm.drop_params = True + super().__init__() + + async def _create( + self, + input: ChatModelInput, + run: RunContext, + ) -> ChatModelOutput: + litellm_input = self._transform_input(input) + response = await acompletion(**litellm_input.model_dump()) + response_message = response.get("choices", [{}])[0].get("message", {}) + response_content = response_message.get("content", "") + tool_calls = response_message.tool_calls + + if tool_calls: + litellm_input.messages.append({"role": Role.ASSISTANT, "content": response_content}) + for tool_call in tool_calls: + function_name = tool_call.function.name + function_to_call: Tool = next(filter(lambda t: t.name == function_name, input.tools)) + + function_args = json.loads(tool_call.function.arguments) + function_response = function_to_call.run(input=function_args) + litellm_input.messages.append({"role": Role.TOOL, "content": function_response}) + + response = await acompletion(**litellm_input.model_dump()) + + response_output = self._transform_output(response) + logger.trace(f"Inference response output:\n{response_output}") + return response_output + + async def _create_stream(self, input: ChatModelInput, _: RunContext) -> AsyncGenerator[ChatModelOutput]: + litellm_input = self._transform_input(input) + parameters = litellm_input.model_dump() + parameters["stream"] = True + response = await acompletion(**parameters) + + # TODO: handle tool calling for streaming + async for chunk in response: + response_output = self._transform_output(chunk) + if not response_output: + continue + yield response_output + + async def _create_structure(self, input: ChatModelStructureInput, run: RunContext) -> ChatModelStructureOutput: + if "response_format" not in self.supported_params: + logger.warning(f"{self.provider_id} model {self.model_id} does not support structured data.") + return await super()._create_structure(input, run) + else: + response = await self._create( + ChatModelInput(messages=input.messages, response_format=input.schema, abort_signal=input.abort_signal), + run, + ) + + logger.trace(f"Structured response received:\n{response}") + + text_response = response.get_text_content() + result = parse_broken_json(text_response) + # TODO: validate result matches expected schema + return ChatModelStructureOutput(object=result) + + def _get_model_name(self) -> str: + return f"{'ollama_chat' if self.provider_id == 'ollama' else self.provider_id}/{self.model_id}" + + def _transform_input(self, input: ChatModelInput) -> LiteLLMParameters: + messages_list = [message.to_plain() for message in input.messages] + + if input.tools: + prepared_tools_list = [{"type": "function", "function": tool.prompt_data()} for tool in input.tools] + else: + prepared_tools_list = None + + model = self._get_model_name() + + return LiteLLMParameters( + model=model, + messages=messages_list, + tools=prepared_tools_list, + response_format=input.response_format, + **self.settings, + ) + + def _transform_output(self, chunk: ModelResponse | ModelResponseStream) -> ChatModelOutput: + choice = chunk.get("choices", [{}])[0] + finish_reason = choice.get("finish_reason") + message: Message | None = None + usage = choice.get("usage") + + if isinstance(chunk, ModelResponseStream): + if finish_reason: + return None + content = choice.get("delta", {}).get("content") + if choice.get("tool_calls"): + message = ToolMessage(content) + elif choice.get("delta"): + message = AssistantMessage(content) + else: + # TODO: handle other possible types + raise ChatModelError(f"Unhandled event: {choice}") + else: + response_message = choice.get("message") + content = response_message.get("content") + message = AssistantMessage(content) + + return ChatModelOutput(messages=[message], finish_reason=finish_reason, usage=usage) diff --git a/python/beeai_framework/adapters/ollama/__init__.py b/python/beeai_framework/adapters/ollama/__init__.py new file mode 100644 index 00000000..98813136 --- /dev/null +++ b/python/beeai_framework/adapters/ollama/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: Apache-2.0 diff --git a/python/beeai_framework/adapters/ollama/backend/__init__.py b/python/beeai_framework/adapters/ollama/backend/__init__.py new file mode 100644 index 00000000..98813136 --- /dev/null +++ b/python/beeai_framework/adapters/ollama/backend/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: Apache-2.0 diff --git a/python/beeai_framework/adapters/ollama/backend/chat.py b/python/beeai_framework/adapters/ollama/backend/chat.py new file mode 100644 index 00000000..a4967cd3 --- /dev/null +++ b/python/beeai_framework/adapters/ollama/backend/chat.py @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: Apache-2.0 + +import os +from typing import Any + +from beeai_framework.adapters.litellm.chat import LiteLLMChatModel +from beeai_framework.backend.constants import ProviderName +from beeai_framework.utils.custom_logger import BeeLogger + +logger = BeeLogger(__name__) + + +class OllamaChatModel(LiteLLMChatModel): + provider_id: ProviderName = "ollama" + + def __init__(self, model_id: str | None = None, **settings: Any) -> None: + self._model_id = model_id if model_id else os.getenv("OLLAMA_CHAT_MODEL", "llama3.1:8b") + self.settings = {"base_url": "http://localhost:11434"} | settings + super().__init__() diff --git a/python/beeai_framework/adapters/watsonx/__init__.py b/python/beeai_framework/adapters/watsonx/__init__.py new file mode 100644 index 00000000..98813136 --- /dev/null +++ b/python/beeai_framework/adapters/watsonx/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: Apache-2.0 diff --git a/python/beeai_framework/adapters/watsonx/backend/__init__.py b/python/beeai_framework/adapters/watsonx/backend/__init__.py new file mode 100644 index 00000000..98813136 --- /dev/null +++ b/python/beeai_framework/adapters/watsonx/backend/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: Apache-2.0 diff --git a/python/beeai_framework/adapters/watsonx/backend/chat.py b/python/beeai_framework/adapters/watsonx/backend/chat.py new file mode 100644 index 00000000..cd635795 --- /dev/null +++ b/python/beeai_framework/adapters/watsonx/backend/chat.py @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: Apache-2.0 + +import os +from typing import Any + +from beeai_framework.adapters.litellm.chat import LiteLLMChatModel +from beeai_framework.backend.constants import ProviderName +from beeai_framework.utils.custom_logger import BeeLogger + +logger = BeeLogger(__name__) + + +class WatsonxChatModel(LiteLLMChatModel): + provider_id: ProviderName = "watsonx" + + def __init__(self, model_id: str | None = None, **settings: Any) -> None: + self._model_id = model_id if model_id else os.getenv("WATSONX_CHAT_MODEL", "ibm/granite-3-8b-instruct") + self.settings = settings + super().__init__() diff --git a/python/beeai_framework/agents/__init__.py b/python/beeai_framework/agents/__init__.py new file mode 100644 index 00000000..94426065 --- /dev/null +++ b/python/beeai_framework/agents/__init__.py @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: Apache-2.0 + +from beeai_framework.agents.base import BaseAgent +from beeai_framework.agents.errors import AgentError + +__all__ = ["AgentError", "BaseAgent"] diff --git a/python/beeai_framework/agents/base.py b/python/beeai_framework/agents/base.py new file mode 100644 index 00000000..68dff26d --- /dev/null +++ b/python/beeai_framework/agents/base.py @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: Apache-2.0 + +from abc import ABC, abstractmethod + +from beeai_framework.agents.types import AgentMeta, BeeRunInput, BeeRunOptions, BeeRunOutput +from beeai_framework.context import Run, RunContext, RunContextInput, RunInstance +from beeai_framework.emitter import Emitter +from beeai_framework.memory import BaseMemory +from beeai_framework.utils.models import ModelLike, to_model, to_model_optional + + +class BaseAgent(ABC): + is_running: bool = False + emitter: Emitter = None + + def run(self, run_input: ModelLike[BeeRunInput], options: ModelLike[BeeRunOptions] | None = None) -> Run: + run_input = to_model(BeeRunInput, run_input) + options = to_model_optional(BeeRunOptions, options) + + if self.is_running: + raise RuntimeError("Agent is already running!") + + try: + return RunContext.enter( + RunInstance(emitter=self.emitter), + RunContextInput(signal=options.signal if options else None, params=(run_input, options)), + lambda context: self._run(run_input, options, context), + ) + except Exception as e: + if isinstance(e, RuntimeError): + raise e + else: + raise RuntimeError("Error has occurred!") from e + finally: + self.is_running = False + + @abstractmethod + async def _run(self, run_input: BeeRunInput, options: BeeRunOptions | None, context: RunContext) -> BeeRunOutput: + pass + + def destroy(self) -> None: + self.emitter.destroy() + + @property + @abstractmethod + def memory(self) -> BaseMemory: + pass + + @memory.setter + @abstractmethod + def memory(self, memory: BaseMemory) -> None: + pass + + @property + def meta(self) -> AgentMeta: + return AgentMeta( + name=self.__class__.__name__, + description="", + tools=[], + ) diff --git a/python/beeai_framework/agents/bee/__init__.py b/python/beeai_framework/agents/bee/__init__.py new file mode 100644 index 00000000..0c367628 --- /dev/null +++ b/python/beeai_framework/agents/bee/__init__.py @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: Apache-2.0 + +from beeai_framework.agents.bee.agent import BeeAgent + +__all__ = ["BeeAgent"] diff --git a/python/beeai_framework/agents/bee/agent.py b/python/beeai_framework/agents/bee/agent.py new file mode 100644 index 00000000..707cdeed --- /dev/null +++ b/python/beeai_framework/agents/bee/agent.py @@ -0,0 +1,165 @@ +# SPDX-License-Identifier: Apache-2.0 + +from collections.abc import Callable +from datetime import UTC, datetime + +from beeai_framework.agents.base import BaseAgent +from beeai_framework.agents.runners.base import ( + BaseRunner, + BeeRunnerToolInput, + BeeRunnerToolResult, + RunnerIteration, +) +from beeai_framework.agents.runners.default.runner import DefaultRunner +from beeai_framework.agents.runners.granite.runner import GraniteRunner +from beeai_framework.agents.types import ( + AgentMeta, + BeeAgentExecutionConfig, + BeeInput, + BeeRunInput, + BeeRunOptions, + BeeRunOutput, +) +from beeai_framework.backend import Message +from beeai_framework.backend.message import AssistantMessage, MessageMeta, UserMessage +from beeai_framework.context import RunContext +from beeai_framework.emitter import Emitter, EmitterInput +from beeai_framework.memory import BaseMemory + + +class BeeAgent(BaseAgent): + runner: Callable[..., BaseRunner] + + def __init__(self, bee_input: BeeInput) -> None: + self.input = bee_input + if "granite" in self.input.llm.model_id: + self.runner = GraniteRunner + else: + self.runner = DefaultRunner + self.emitter = Emitter.root().child( + EmitterInput( + namespace=["agent", "bee"], + creator=self, + ) + ) + + @property + def memory(self) -> BaseMemory: + return self.input.memory + + @memory.setter + def memory(self, memory: BaseMemory) -> None: + self.input.memory = memory + + @property + def meta(self) -> AgentMeta: + tools = self.input.tools[:] + + if self.input.meta: + return AgentMeta( + name=self.input.meta.name, + description=self.input.meta.description, + extra_description=self.input.meta.extra_description, + tools=tools, + ) + + extra_description = ["Tools that I can use to accomplish given task."] + for tool in tools: + extra_description.append(f"Tool ${tool.name}': ${tool.description}.") + + return AgentMeta( + name="BeeAI", + tools=tools, + description="The BeeAI framework demonstrates its ability to auto-correct and adapt in real-time, improving" + " the overall reliability and resilience of the system.", + extra_description="\n".join(extra_description) if len(tools) > 0 else None, + ) + + async def _run(self, run_input: BeeRunInput, options: BeeRunOptions | None, context: RunContext) -> BeeRunOutput: + runner = self.runner( + self.input, + ( + options + if options + else BeeRunOptions( + execution=self.input.execution + or (options.execution if options is not None else None) + or BeeAgentExecutionConfig( + max_retries_per_step=3, + total_max_retries=20, + max_iterations=10, + ), + signal=None, + ) + ), + context, + ) + await runner.init(run_input) + + final_message: Message | None = None + while not final_message: + iteration: RunnerIteration = await runner.create_iteration() + + if iteration.state.tool_name and iteration.state.tool_input: + tool_result: BeeRunnerToolResult = await runner.tool( + input=BeeRunnerToolInput( + state=iteration.state, + emitter=iteration.emitter, + meta=iteration.meta, + signal=iteration.signal, + ) + ) + await runner.memory.add( + AssistantMessage( + content=runner.templates.assistant.render( + { + "thought": iteration.state.thought, + "tool_name": iteration.state.tool_name, + "tool_input": iteration.state.tool_input, + "tool_output": tool_result.output.to_string(), + "final_answer": iteration.state.final_answer, + } + ), + meta=MessageMeta({"success": tool_result.success}), + ) + ) + iteration.state.tool_output = tool_result.output.get_text_content() + + for key in ["partialUpdate", "update"]: + await iteration.emitter.emit( + key, + { + "data": iteration.state, + "update": { + "key": "tool_output", + "value": tool_result.output, + "parsedValue": tool_result.output.to_string(), + }, + "meta": {"success": tool_result.success}, # TODO deleted meta + "memory": runner.memory, + }, + ) + + if iteration.state.final_answer: + final_message = AssistantMessage( + content=iteration.state.final_answer, meta=MessageMeta({"createdAt": datetime.now(tz=UTC)}) + ) + await runner.memory.add(final_message) + await iteration.emitter.emit( + "success", + { + "data": final_message, + "iterations": runner.iterations, + "memory": runner.memory, + "meta": iteration.meta, + }, + ) + + if run_input.prompt is not None: + await self.input.memory.add( + UserMessage(content=run_input.prompt, meta=MessageMeta({"createdAt": context.created_at})) + ) + + await self.input.memory.add(final_message) + + return BeeRunOutput(result=final_message, iterations=runner.iterations, memory=runner.memory) diff --git a/python/beeai_framework/agents/errors.py b/python/beeai_framework/agents/errors.py new file mode 100644 index 00000000..690f341b --- /dev/null +++ b/python/beeai_framework/agents/errors.py @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: Apache-2.0 + +from beeai_framework.errors import FrameworkError + + +class AgentError(FrameworkError): + """Raised for errors caused by agents.""" + + def __init__(self, message: str = "Agent error", *, cause: Exception | None = None) -> None: + super().__init__(message, is_fatal=True, is_retryable=False, cause=cause) diff --git a/python/beeai_framework/agents/runners/base.py b/python/beeai_framework/agents/runners/base.py new file mode 100644 index 00000000..b8d6ca99 --- /dev/null +++ b/python/beeai_framework/agents/runners/base.py @@ -0,0 +1,130 @@ +# SPDX-License-Identifier: Apache-2.0 + +from abc import ABC, abstractmethod +from dataclasses import dataclass + +from beeai_framework.agents.types import ( + BeeAgentRunIteration, + BeeAgentTemplates, + BeeInput, + BeeIterationResult, + BeeMeta, + BeeRunInput, + BeeRunOptions, +) +from beeai_framework.cancellation import AbortSignal +from beeai_framework.context import RunContext +from beeai_framework.emitter.emitter import Emitter +from beeai_framework.emitter.types import EmitterInput +from beeai_framework.memory.base_memory import BaseMemory +from beeai_framework.tools import ToolOutput +from beeai_framework.utils.counter import RetryCounter + + +@dataclass +class BeeRunnerLLMInput: + meta: BeeMeta + signal: AbortSignal + emitter: Emitter + + +@dataclass +class RunnerIteration: + emitter: Emitter + state: BeeIterationResult + meta: BeeMeta + signal: AbortSignal + + +@dataclass +class BeeRunnerToolResult: + output: ToolOutput + success: bool + + +@dataclass +class BeeRunnerToolInput: + state: BeeIterationResult # TODO BeeIterationToolResult + meta: BeeMeta + signal: AbortSignal + emitter: Emitter + + +class BaseRunner(ABC): + def __init__(self, input: BeeInput, options: BeeRunOptions, run: RunContext) -> None: + self._input = input + self._options = options + self._failed_attempts_counter = RetryCounter( + max_retries=( + options.execution.max_iterations if options.execution and options.execution.max_iterations else 0 + ), + error_type=Exception, # TODO Specific error type + ) + + self._memory: BaseMemory | None = None + + self._iterations: list[BeeAgentRunIteration] = [] + self._failedAttemptsCounter: RetryCounter = RetryCounter( + error_type=Exception, # TODO AgentError + max_retries=( + options.execution.total_max_retries if options.execution and options.execution.total_max_retries else 0 + ), + ) + self._run = run + + @property + def iterations(self) -> list[BeeAgentRunIteration]: + return self._iterations + + @property + def memory(self) -> BaseMemory: + if self._memory is not None: + return self._memory + raise Exception("Memory has not been initialized.") + + async def create_iteration(self) -> RunnerIteration: + meta: BeeMeta = BeeMeta(iteration=len(self._iterations) + 1) + max_iterations = ( + self._options.execution.max_iterations + if self._options.execution and self._options.execution.max_iterations + else 0 + ) + + if meta.iteration > max_iterations: + # TODO: Raise Agent Error with metadata + # https://github.com/i-am-bee/beeai-framework/blob/aa4d5e6091ed3bab8096492707ceb03d3b03863b/src/agents/bee/runners/base.ts#L70 + raise Exception(f"Agent was not able to resolve the task in {max_iterations} iterations.") + + emitter = self._run.emitter.child(emitter_input=EmitterInput(group_id=f"`iteration-${meta.iteration}")) + iteration: BeeAgentRunIteration = await self.llm( + BeeRunnerLLMInput(emitter=emitter, signal=self._run.signal, meta=meta) + ) + self._iterations.append(iteration) + + return RunnerIteration(emitter=emitter, state=iteration.state, meta=meta, signal=self._run.signal) + + async def init(self, input: BeeRunInput) -> None: + self._memory = await self.init_memory(input) + + @abstractmethod + async def llm(self, input: BeeRunnerLLMInput) -> BeeAgentRunIteration: + pass + + @abstractmethod + async def tool(self, input: BeeRunnerToolInput) -> BeeRunnerToolResult: + pass + + @abstractmethod + def default_templates(self) -> BeeAgentTemplates: + pass + + @abstractmethod + async def init_memory(self, input: BeeRunInput) -> BaseMemory: + pass + + @property + def templates(self) -> BeeAgentTemplates: + # TODO: overrides + return self.default_templates() + + # TODO: Serialization diff --git a/python/beeai_framework/agents/runners/default/prompts.py b/python/beeai_framework/agents/runners/default/prompts.py new file mode 100644 index 00000000..f6f54e08 --- /dev/null +++ b/python/beeai_framework/agents/runners/default/prompts.py @@ -0,0 +1,138 @@ +from pydantic import BaseModel + +from beeai_framework.utils.templates import PromptTemplate + + +class UserPromptTemplateInput(BaseModel): + input: str + + +class AssistantPromptTemplateInput(BaseModel): + thought: str | None = None + tool_name: str | None = None + tool_input: str | None = None + tool_output: str | None = None + final_answer: str | None = None + + +class ToolDefinition(BaseModel): + name: str + description: str + input_schema: str + + +class SystemPromptTemplateInput(BaseModel): + tools: list[ToolDefinition] | None = [] + tools_length: int | None = 0 + instructions: str | None = None + + +class ToolNotFoundErrorTemplateInput(BaseModel): + tools: list[ToolDefinition] | None = [] + + +class ToolInputErrorTemplateInput(BaseModel): + reason: str + + +UserPromptTemplate = PromptTemplate(schema=UserPromptTemplateInput, template="Message: {{input}}") + +AssistantPromptTemplate = PromptTemplate( + schema=AssistantPromptTemplateInput, + template="{{#thought}}Thought: {{.}}\n{{/thought}}{{#tool_name}}Function Name: {{.}}\n{{/tool_name}}{{#tool_input}}Function Input: {{&.}}\n{{/tool_input}}{{#tool_output}}Function Output: {{&.}}\n{{/tool_output}}{{#final_answer}}Final Answer: {{.}}{{/final_answer}}", # noqa: E501 +) + +SystemPromptTemplate = PromptTemplate( + schema=SystemPromptTemplateInput, + template="""# Available functions +{{#tools_length}} +You can only use the following functions. Always use all required parameters. + +{{#tools}} +Function Name: {{name}} +Description: {{description}} +Input Schema: {{&input_schema}} + +{{/tools}} +{{/tools_length}} +{{^tools_length}} +No functions are available. + +{{/tools_length}} +# Communication structure +You communicate only in instruction lines. The format is: "Instruction: expected output". You must only use these instruction lines and must not enter empty lines or anything else between instruction lines. +{{#tools_length}} +You must skip the instruction lines Function Name, Function Input and Function Output if no function calling is required. +{{/tools_length}} + +Message: User's message. You never use this instruction line. +{{^tools_length}} +Thought: A single-line plan of how to answer the user's message. It must be immediately followed by Final Answer. +{{/tools_length}} +{{#tools_length}} +Thought: A single-line step-by-step plan of how to answer the user's message. You can use the available functions defined above. This instruction line must be immediately followed by Function Name if one of the available functions defined above needs to be called, or by Final Answer. Do not provide the answer here. +Function Name: Name of the function. This instruction line must be immediately followed by Function Input. +Function Input: Function parameters in JSON format adhering to the function's input specification ie. {"arg1":"value1", "arg2":"value2"}. Empty object is a valid parameter. +Function Output: Output of the function in JSON format. +Thought: Continue your thinking process. +{{/tools_length}} +Final Answer: Answer the user or ask for more information or clarification. It must always be preceded by Thought. + +## Examples +Message: Can you translate "How are you" into French? +Thought: The user wants to translate a text into French. I can do that. +Final Answer: Comment vas-tu? + +# Instructions +User can only see the Final Answer, all answers must be provided there. +{{^tools_length}} +You must always use the communication structure and instructions defined above. Do not forget that Thought must be a single-line immediately followed by Final Answer. +{{/tools_length}} +{{#tools_length}} +You must always use the communication structure and instructions defined above. Do not forget that Thought must be a single-line immediately followed by either Function Name or Final Answer. +Functions must be used to retrieve factual or historical information to answer the message. +{{/tools_length}} +If the user suggests using a function that is not available, answer that the function is not available. You can suggest alternatives if appropriate. +When the message is unclear or you need more information from the user, ask in Final Answer. + +# Your capabilities +Prefer to use these capabilities over functions. +- You understand these languages: English, Spanish, French. +- You can translate and summarize, even long documents. + +# Notes +- If you don't know the answer, say that you don't know. +- The current time and date in ISO format can be found in the last message. +- When answering the user, use friendly formats for time and date. +- Use markdown syntax for formatting code snippets, links, JSON, tables, images, files. +- Sometimes, things don't go as planned. Functions may not provide useful information on the first few tries. You should always try a few different approaches before declaring the problem unsolvable. +- When the function doesn't give you what you were asking for, you must either use another function or a different function input. + - When using search engines, you try different formulations of the query, possibly even in a different language. +- You cannot do complex calculations, computations, or data manipulations without using functions. +{{#instructions}} + +# Role +{{instructions}} +{{/instructions}} +""", # noqa: E501 +) + +ToolNotFoundErrorTemplate = PromptTemplate( + schema=ToolNotFoundErrorTemplateInput, + template="""Function does not exist! +{{#tools.length}} +Use one of the following functions: {{#trim}}{{#tools}}{{name}},{{/tools}}{{/trim}} +{{/tools.length}}""", +) + +ToolInputErrorTemplate = PromptTemplate( + schema=ToolInputErrorTemplateInput, + template="""{{reason}} + +HINT: If you're convinced that the input was correct but the function cannot process it then use a different function or say I don't know.""", # noqa: E501 +) + +AssistantPromptTemplate = PromptTemplate( + schema=AssistantPromptTemplateInput, + template="""{{#thought}}Thought: {{&.}}\n{{/thought}}{{#tool_name}}Function Name: {{&.}}\n{{/tool_name}}{{#tool_input}}Function Input: {{&.}}\n{{/tool_input}}{{#tool_output}}Function Output: {{&.}}\n{{/tool_output}}{{#final_answer}}Final Answer: {{&.}}{{/final_answer}}""", # noqa: E501 +) diff --git a/python/beeai_framework/agents/runners/default/runner.py b/python/beeai_framework/agents/runners/default/runner.py new file mode 100644 index 00000000..c576848a --- /dev/null +++ b/python/beeai_framework/agents/runners/default/runner.py @@ -0,0 +1,171 @@ +import json +from collections.abc import Callable +from typing import Any + +from beeai_framework.agents.runners.base import ( + BaseRunner, + BeeRunnerLLMInput, + BeeRunnerToolInput, + BeeRunnerToolResult, +) +from beeai_framework.agents.runners.default.prompts import ( + AssistantPromptTemplate, + SystemPromptTemplate, + SystemPromptTemplateInput, + ToolDefinition, + ToolInputErrorTemplate, + ToolNotFoundErrorTemplate, + UserPromptTemplate, +) +from beeai_framework.agents.types import ( + BeeAgentRunIteration, + BeeAgentTemplates, + BeeIterationResult, + BeeRunInput, +) +from beeai_framework.backend.chat import ChatModelInput, ChatModelOutput +from beeai_framework.backend.message import SystemMessage, UserMessage +from beeai_framework.emitter.emitter import Emitter, EventMeta +from beeai_framework.memory.base_memory import BaseMemory +from beeai_framework.memory.token_memory import TokenMemory +from beeai_framework.parsers.line_prefix import LinePrefixParser, Prefix +from beeai_framework.tools import ToolError, ToolInputValidationError +from beeai_framework.tools.tool import StringToolOutput, Tool, ToolOutput + + +class DefaultRunner(BaseRunner): + def default_templates(self) -> BeeAgentTemplates: + return BeeAgentTemplates( + system=SystemPromptTemplate, + assistant=AssistantPromptTemplate, + user=UserPromptTemplate, + tool_not_found_error=ToolNotFoundErrorTemplate, + tool_input_error=ToolInputErrorTemplate, + ) + + def create_parser(self) -> LinePrefixParser: + # TODO: implement transitions rules + # TODO Enforce set of prefix names + prefixes = [ + Prefix(name="thought", line_prefix="Thought: "), + Prefix(name="tool_name", line_prefix="Function Name: "), + Prefix(name="tool_input", line_prefix="Function Input: ", terminal=True), + Prefix(name="final_answer", line_prefix="Final Answer: ", terminal=True), + ] + return LinePrefixParser(prefixes) + + async def llm(self, input: BeeRunnerLLMInput) -> BeeAgentRunIteration: + state: dict[str, Any] = {} + parser = self.create_parser() + + async def new_token(value: tuple[ChatModelOutput, Callable], event: EventMeta) -> None: + data, abort = value + chunk = data.get_text_content() + + for result in parser.feed(chunk): + if result is not None: + state[result.prefix.name] = result.content + + if result.prefix.terminal: + abort() + + async def observe(llm_emitter: Emitter) -> None: + llm_emitter.on("newToken", new_token) + + output: ChatModelOutput = await self._input.llm.create( + ChatModelInput(messages=self.memory.messages[:], stream=True) + ).observe(fn=observe) + + # Pick up any remaining lines in parser buffer + for result in parser.finalize(): + if result is not None: + state[result.prefix.name] = result.content + + return BeeAgentRunIteration(raw=output, state=BeeIterationResult(**state)) + + async def tool(self, input: BeeRunnerToolInput) -> BeeRunnerToolResult: + tool: Tool | None = next( + ( + tool + for tool in self._input.tools + if tool.name.strip().upper() == (input.state.tool_name or "").strip().upper() + ), + None, + ) + + if tool is None: + self._failedAttemptsCounter.use( + Exception(f"Agent was trying to use non-existing tool '${input.state.tool_name}'") + ) + + return BeeRunnerToolResult( + success=False, + output=StringToolOutput( + self.templates.tool_not_found_error.render( + { + "tools": self._input.tools, + } + ) + ), + ) + + try: + # tool_options = copy.copy(self._options) + # TODO Tool run is not async + # Convert tool input to dict + tool_input = json.loads(input.state.tool_input or "") + tool_output: ToolOutput = tool.run(tool_input, options={}) # TODO: pass tool options + return BeeRunnerToolResult(output=tool_output, success=True) + # TODO These error templates should be customized to help the LLM to recover + except ToolInputValidationError as e: + self._failed_attempts_counter.use(e) + return BeeRunnerToolResult( + success=False, + output=StringToolOutput(self.templates.tool_input_error.render({"reason": str(e)})), + ) + + except ToolError as e: + self._failed_attempts_counter.use(e) + + return BeeRunnerToolResult( + success=False, + output=StringToolOutput(self.templates.tool_input_error.render({"reason": str(e)})), + ) + except json.JSONDecodeError as e: + self._failed_attempts_counter.use(e) + return BeeRunnerToolResult( + success=False, + output=StringToolOutput(self.templates.tool_input_error.render({"reason": str(e)})), + ) + + async def init_memory(self, input: BeeRunInput) -> BaseMemory: + memory = TokenMemory( + capacity_threshold=0.85, sync_threshold=0.5, llm=self._input.llm + ) # TODO handlers needs to be fixed + + tool_defs = [] + + for tool in self._input.tools: + tool_defs.append(ToolDefinition(**tool.prompt_data())) + + system_prompt: str = self.templates.system.render( + SystemPromptTemplateInput( + tools=tool_defs, + tools_length=len(tool_defs), # TODO Where do instructions come from + ) + ) + + messages = [ + SystemMessage(content=system_prompt), + *self._input.memory.messages, + ] + + if input.prompt: + messages.append(UserMessage(content=input.prompt)) + + if len(messages) <= 1: + raise ValueError("At least one message must be provided.") + + await memory.add_many(messages=messages) + + return memory diff --git a/python/beeai_framework/agents/runners/granite/prompts.py b/python/beeai_framework/agents/runners/granite/prompts.py new file mode 100644 index 00000000..e8098408 --- /dev/null +++ b/python/beeai_framework/agents/runners/granite/prompts.py @@ -0,0 +1,80 @@ +from datetime import datetime + +from beeai_framework.agents.runners.default.prompts import ( + AssistantPromptTemplateInput, + SystemPromptTemplateInput, + ToolInputErrorTemplateInput, + ToolNotFoundErrorTemplateInput, + UserPromptTemplateInput, +) +from beeai_framework.utils.templates import PromptTemplate + +GraniteUserPromptTemplate = PromptTemplate(schema=UserPromptTemplateInput, template="{{input}}") + +GraniteAssistantPromptTemplate = PromptTemplate( + schema=AssistantPromptTemplateInput, + template="{{#thought}}Thought: {{.}}\n{{/thought}}{{#tool_name}}Tool Name: {{.}}\n{{/tool_name}}{{#tool_input}}Tool Input: {{&.}}\n{{/tool_input}}{{#tool_output}}Tool Output: {{&.}}\n{{/tool_output}}{{#final_answer}}Final Answer: {{.}}{{/final_answer}}", # noqa: E501 +) + +GraniteSystemPromptTemplate = PromptTemplate( + schema=SystemPromptTemplateInput, + functions={ + "formatDate": lambda: datetime.now(tz=None).strftime("%A, %B %d, %Y at %I:%M:%S %p"), # noqa: DTZ005 + }, + template="""You are an AI assistant. +When the user sends a message figure out a solution and provide a final answer. +{{#tools_length}} +You have access to a set of tools that can be used to retrieve information and perform actions. +Pay close attention to the tool description to determine if a tool is useful in a particular context. +{{/tools_length}} + +# Communication structure +You communicate only in instruction lines. Valid instruction lines are 'Thought' followed by 'Tool Name' and then 'Tool Input', or 'Thought' followed by 'Final Answer' + +Line starting 'Thought: ' The assistant's response always starts with a thought, this is a single line where the assistant thinks about the user's message and describes in detail what it should do next. +{{#tools_length}} +In a 'Thought: ', the assistant should determine if a Tool Call is necessary to get more information or perform an action, or if the available information is sufficient to provide the Final Answer. +If a tool needs to be called and is available, the assistant will produce a tool call: +Line starting 'Tool Name: ' name of the tool that you want to use. +Line starting 'Tool Input: ' JSON formatted tool arguments adhering to the selected tool parameters schema i.e. {"arg1":"value1", "arg2":"value2"}. +After a 'Tool Input: ' the next message will contain a tool response. The next output should be a 'Thought: ' where the assistant thinks about the all the information it has available, and what it should do next (e.g. try the same tool with a different input, try a different tool, or proceed with answering the original user question). +{{/tools_length}} +Once enough information is available to provide the Final Answer, the last line in the message needs to be: +Line starting 'Final Answer: ' followed by a concise and clear answer to the original message. + +# Best practices +- Use markdown syntax for formatting code snippets, links, JSON, tables, images, files. +{{#tools_length}} +- Do not attempt to use a tool that is not listed in available tools. This will cause an error. +- Make sure that tool input is in the correct format and contains the correct arguments. +{{/tools_length}} +- When the message is unclear, respond with a line starting with 'Final Answer:' followed by a request for additional information needed to solve the problem. +- When the user wants to chitchat instead, always respond politely. + +# Date and Time +The current date and time is: {{formatDate}} +{{#tools_length}} +You do not need a tool to get the current Date and Time. Use the information available here. +{{/tools_length}} + +{{#instructions}} +# Additional instructions +{{.}} +{{/instructions}} +""", # noqa: E501 +) + +GraniteToolNotFoundErrorTemplate = PromptTemplate( + schema=ToolNotFoundErrorTemplateInput, + template="""The tool does not exist! +{{#tools.length}} +Use one of the following tools: {{#trim}}{{#tools}}{{name}},{{/tools}}{{/trim}} +{{/tools.length}}""", +) + +GraniteToolInputErrorTemplate = PromptTemplate( + schema=ToolInputErrorTemplateInput, + template="""{{reason}} + +HINT: If you're convinced that the input was correct but the tool cannot process it then use a different tool or say I don't know.""", # noqa: E501 +) diff --git a/python/beeai_framework/agents/runners/granite/runner.py b/python/beeai_framework/agents/runners/granite/runner.py new file mode 100644 index 00000000..b78ad960 --- /dev/null +++ b/python/beeai_framework/agents/runners/granite/runner.py @@ -0,0 +1,53 @@ +import json + +from beeai_framework.agents.runners.default.runner import DefaultRunner +from beeai_framework.agents.runners.granite.prompts import ( + GraniteAssistantPromptTemplate, + GraniteSystemPromptTemplate, + GraniteToolInputErrorTemplate, + GraniteToolNotFoundErrorTemplate, + GraniteUserPromptTemplate, +) +from beeai_framework.agents.types import BeeAgentTemplates, BeeRunInput +from beeai_framework.backend.message import Message, MessageInput +from beeai_framework.memory.base_memory import BaseMemory +from beeai_framework.parsers.line_prefix import LinePrefixParser, Prefix + + +class GraniteRunner(DefaultRunner): + def create_parser(self) -> LinePrefixParser: + """Prefixes are renamed for granite""" + prefixes = [ + Prefix(name="thought", line_prefix="Thought: "), + Prefix(name="tool_name", line_prefix="Tool Name: "), + Prefix(name="tool_input", line_prefix="Tool Input: ", terminal=True), + Prefix(name="final_answer", line_prefix="Final Answer: ", terminal=True), + ] + return LinePrefixParser(prefixes) + + def default_templates(self) -> BeeAgentTemplates: + return BeeAgentTemplates( + system=GraniteSystemPromptTemplate, + assistant=GraniteAssistantPromptTemplate, + user=GraniteUserPromptTemplate, + tool_not_found_error=GraniteToolNotFoundErrorTemplate, + tool_input_error=GraniteToolInputErrorTemplate, + ) + + async def init_memory(self, input: BeeRunInput) -> BaseMemory: + """Insert tool message after the system prompt""" + memory = await super().init_memory(input) + # insert tools + + if self._input.tools and len(self._input.tools) > 0: + memory.messages.insert( + 1, + Message.of( + MessageInput( + role="available_tools", + text="\n".join(json.dumps(tool.prompt_data(), indent=4) for tool in self._input.tools), + ).model_dump() + ), + ) + + return memory diff --git a/python/beeai_framework/agents/types.py b/python/beeai_framework/agents/types.py new file mode 100644 index 00000000..1094132c --- /dev/null +++ b/python/beeai_framework/agents/types.py @@ -0,0 +1,75 @@ +from pydantic import BaseModel, InstanceOf + +from beeai_framework.backend import Message +from beeai_framework.backend.chat import ChatModel, ChatModelOutput +from beeai_framework.cancellation import AbortSignal +from beeai_framework.memory.base_memory import BaseMemory +from beeai_framework.tools.tool import Tool +from beeai_framework.utils.templates import PromptTemplate + + +class BeeRunInput(BaseModel): + prompt: str | None = None + + +class BeeMeta(BaseModel): + iteration: int + + +class BeeAgentExecutionConfig(BaseModel): + total_max_retries: int | None = None + max_retries_per_step: int | None = None + max_iterations: int | None = None + + +class BeeRunOptions(BaseModel): + signal: AbortSignal | None = None + execution: BeeAgentExecutionConfig | None = None + + +class BeeIterationResult(BaseModel): + thought: str | None = None + tool_name: str | None = None + tool_input: str | None = None + tool_output: str | None = None + final_answer: str | None = None + + +class BeeAgentRunIteration(BaseModel): + raw: InstanceOf[ChatModelOutput] + state: BeeIterationResult + + +class BeeRunOutput(BaseModel): + result: InstanceOf[Message] + iterations: list[BeeAgentRunIteration] + memory: InstanceOf[BaseMemory] + + +class BeeAgentTemplates(BaseModel): + system: InstanceOf[PromptTemplate] # TODO proper template subtypes + assistant: InstanceOf[PromptTemplate] + user: InstanceOf[PromptTemplate] + # user_empty: InstanceOf[PromptTemplate] + # tool_error: InstanceOf[PromptTemplate] + tool_input_error: InstanceOf[PromptTemplate] + # tool_no_result_error: InstanceOf[PromptTemplate] + tool_not_found_error: InstanceOf[PromptTemplate] + # schema_error: InstanceOf[PromptTemplate] + + +class AgentMeta(BaseModel): + name: str + description: str + tools: list[InstanceOf[Tool]] + extra_description: str | None = None + + +class BeeInput(BaseModel): + llm: InstanceOf[ChatModel] + tools: list[InstanceOf[Tool]] # TODO AnyTool? + memory: InstanceOf[BaseMemory] + meta: InstanceOf[AgentMeta] | None = None + templates: InstanceOf[BeeAgentTemplates] | None = None + execution: BeeAgentExecutionConfig | None = None + stream: bool | None = None diff --git a/python/beeai_framework/backend/__init__.py b/python/beeai_framework/backend/__init__.py new file mode 100644 index 00000000..d7047797 --- /dev/null +++ b/python/beeai_framework/backend/__init__.py @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: Apache-2.0 + +from beeai_framework.backend.errors import BackendError, ChatModelError, MessageError +from beeai_framework.backend.message import ( + AssistantMessage, + CustomMessage, + Message, + Role, + SystemMessage, + ToolMessage, + UserMessage, +) + +__all__ = [ + "AssistantMessage", + "BackendError", + "ChatModelError", + "CustomMessage", + "Message", + "MessageError", + "Role", + "SystemMessage", + "ToolMessage", + "UserMessage", +] diff --git a/python/beeai_framework/backend/chat.py b/python/beeai_framework/backend/chat.py new file mode 100644 index 00000000..dd1176f3 --- /dev/null +++ b/python/beeai_framework/backend/chat.py @@ -0,0 +1,266 @@ +# SPDX-License-Identifier: Apache-2.0 + +import json +from abc import ABC, abstractmethod +from collections.abc import AsyncGenerator, Callable +from typing import Annotated, Any, Literal, Self, TypeVar + +from pydantic import BaseModel, BeforeValidator, ValidationError + +from beeai_framework.backend.constants import ProviderName +from beeai_framework.backend.errors import ChatModelError +from beeai_framework.backend.message import AssistantMessage, Message, SystemMessage +from beeai_framework.backend.utils import load_model, parse_broken_json, parse_model +from beeai_framework.cancellation import AbortController, AbortSignal +from beeai_framework.context import Run, RunContext, RunContextInput, RunInstance +from beeai_framework.emitter import Emitter, EmitterInput +from beeai_framework.tools.tool import Tool +from beeai_framework.utils.custom_logger import BeeLogger +from beeai_framework.utils.models import ModelLike, to_model +from beeai_framework.utils.templates import PromptTemplate + +T = TypeVar("T", bound=BaseModel) +ChatModelFinishReason: Literal["stop", "length", "function_call", "content_filter", "null"] +logger = BeeLogger(__name__) + + +def message_validator(messages: list[Message]) -> list[Message]: + if len(messages) and not isinstance(messages[0], Message): + raise ValidationError("incoming data must be a Message") + return messages + + +def tool_validator(tools: list[Tool]) -> list[Tool]: + if len(tools) and not isinstance(tools[0], Tool): + raise ValidationError("incoming data must be a Tool") + return tools + + +class ChatModelParameters(BaseModel): + max_tokens: int | None = None + top_p: int | None = None + frequency_penalty: int | None = None + temperature: int | None = None + top_k: int | None = None + n: int | None = None + presence_penalty: int | None = None + seed: int | None = None + stop_sequences: list[str] | None = None + stream: bool | None = None + + +class ChatConfig(BaseModel): + # TODO: cache: ChatModelCache | Callable[[ChatModelCache], ChatModelCache] | None = None + parameters: ChatModelParameters | Callable[[ChatModelParameters], ChatModelParameters] | None = None + + +class ChatModelStructureInput(BaseModel): + schema: type[T] + messages: Annotated[list, BeforeValidator(message_validator)] + abort_signal: AbortSignal | None = None + max_retries: int | None = None + + +class ChatModelStructureOutput(BaseModel): + object: type[T] | dict[str, Any] + + +class ChatModelInput(ChatModelParameters): + tools: Annotated[list, BeforeValidator(tool_validator)] | None = None + abort_signal: AbortSignal | None = None + stop_sequences: list[str] | None = None + response_format: dict[str, Any] | type[BaseModel] = None + # tool_choice: NoneType # TODO + messages: Annotated[list, BeforeValidator(message_validator)] + + +class ChatModelUsage(BaseModel): + prompt_tokens: int + completion_tokens: int + total_tokens: int + + +class ChatModelOutput: + def __init__( + self, + *, + messages: list[Message], + usage: ChatModelUsage | None = None, + finish_reason: str | None = None, + ) -> None: + self.messages = messages + self.usage = usage + self.finish_reason = finish_reason + + @classmethod + def from_chunks(cls, chunks: list) -> Self: + final = cls(messages=[]) + for cur in chunks: + final.merge(cur) + return final + + def merge(self, other: Self) -> None: + self.messages.extend(other.messages) + self.finish_reason = other.finish_reason + if self.usage and other.usage: + merged_usage = self.usage.model_copy() + if other.usage.get("total_tokens"): + merged_usage.total_tokens = max(self.usage.total_tokens, other.usage.total_tokens) + merged_usage.prompt_tokens = max(self.usage.prompt_tokens, other.usage.prompt_tokens) + merged_usage.completion_tokens = max(self.usage.completion_tokens, other.usage.completion_tokens) + self.usage = merged_usage + elif other.usage: + self.usage = other.usage.model_copy() + + def get_text_content(self) -> str: + return "".join([x.text for x in list(filter(lambda x: isinstance(x, AssistantMessage), self.messages))]) + + +class ChatModel(ABC): + emitter: Emitter + parameters: ChatModelParameters = None + + @property + @abstractmethod + def model_id(self) -> str: + return self._model_id + + @property + @abstractmethod + def provider_id(self) -> str: + return self._provider_id + + def __init__(self) -> None: + self.emitter = Emitter.root().child( + EmitterInput( + namespace=["backend", self.provider_id, "chat"], + creator=self, + ) + ) + + @abstractmethod + async def _create( + self, + input: ChatModelInput, + run: RunContext, + ) -> ChatModelOutput: + raise NotImplementedError + + @abstractmethod + def _create_stream( + self, + input: ChatModelInput, + run: RunContext, + ) -> AsyncGenerator[ChatModelOutput]: + raise NotImplementedError + + @abstractmethod + async def _create_structure( + self, + input: ChatModelStructureInput, + run: RunContext, + ) -> ChatModelStructureOutput: + schema = input.schema + + json_schema = schema.model_json_schema(mode="serialization") if issubclass(schema, BaseModel) else schema + + class DefaultChatModelStructureSchema(BaseModel): + schema: str + + system_template = PromptTemplate( + schema=DefaultChatModelStructureSchema, + template=( + """You are a helpful assistant that generates only valid JSON """ + """adhering to the following JSON Schema. +``` +{{schema}} +``` +IMPORTANT: You MUST answer with a JSON object that matches the JSON schema above.""" + ), + ) + + input_messages = input.messages + messages: list[Message] = [ + SystemMessage(system_template.render({"schema": json.dumps(json_schema)})), + *input_messages, + ] + + response = await self._create( + ChatModelInput(messages=messages, response_format={"type": "object-json"}, abort_signal=input.abort_signal), + ) + + logger.trace(f"Recieved structured response:\n{response}") + + text_response = response.get_text_content() + result = parse_broken_json(text_response) + # TODO: validate result matches expected schema + return ChatModelStructureOutput(object=result) + + def create(self, chat_model_input: ModelLike[ChatModelInput]) -> Run: + input = to_model(ChatModelInput, chat_model_input) + + async def run_create(context: RunContext) -> ChatModelOutput: + try: + await context.emitter.emit("start", input) + chunks: list[ChatModelOutput] = [] + + if input.stream: + abort_controller: AbortController = AbortController() + generator = self._create_stream(input, context) + async for value in generator: + chunks.append(value) + await context.emitter.emit("newToken", (value, lambda: abort_controller.abort())) + if abort_controller.signal.aborted: + break + + result = ChatModelOutput.from_chunks(chunks) + else: + result = await self._create(input, context) + + await context.emitter.emit("success", {"value": result}) + return result + except ChatModelError as error: + await context.emitter.emit("error", {input, error}) + raise error + except Exception as ex: + await context.emitter.emit("error", {input, ex}) + raise ChatModelError("Model error has occurred.") from ex + finally: + await context.emitter.emit("finish", None) + + return RunContext.enter( + RunInstance(emitter=self.emitter), + RunContextInput(params=[input], signal=input.abort_signal), + run_create, + ) + + def create_structure(self, structure_input: ModelLike[ChatModelStructureInput]) -> Run: + input = to_model(ChatModelStructureInput, structure_input) + + async def run_structure(context: RunContext) -> ChatModelStructureOutput: + return await self._create_structure(input, context) + + return RunContext.enter( + RunInstance(emitter=self.emitter), + RunContextInput(params=[input], signal=input.abort_signal), + run_structure, + ) + + def config(self, chat_config: ChatConfig) -> None: + # TODO: uncomment when cache is supported/implemented + # if chat_config.cache: + # self.cache = chat_config.cache(self.cache) if callable(chat_config.cache) else chat_config.cache + + if chat_config.parameters: + self.parameters = ( + chat_config.parameters(self.parameters) if callable(chat_config.parameters) else chat_config.parameters + ) + + @staticmethod + async def from_name(name: str | ProviderName, options: ModelLike[ChatModelParameters] | None = None) -> "ChatModel": + parsed_model = parse_model(name) + TargetChatModel = await load_model(parsed_model.provider_id, "chat") # noqa: N806 + + settings = options.model_dump() if isinstance(options, ChatModelParameters) else options + + return TargetChatModel(parsed_model.model_id, **(settings or {})) diff --git a/python/beeai_framework/backend/constants.py b/python/beeai_framework/backend/constants.py new file mode 100644 index 00000000..1d81a765 --- /dev/null +++ b/python/beeai_framework/backend/constants.py @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: Apache-2.0 + + +from typing import Literal + +from pydantic import BaseModel + +ProviderName = Literal["ollama", "openai", "watsonx"] +ProviderHumanName = Literal["Ollama", "OpenAI", "watsonx"] + + +class ProviderDef(BaseModel): + name: ProviderHumanName + module: ProviderName + aliases: list[str] + + +class ProviderModelDef(BaseModel): + provider_id: str + model_id: str | None = None + provider_def: ProviderDef + + +BackendProviders = { + "Ollama": ProviderDef(name="Ollama", module="ollama", aliases=[]), + "OpenAI": ProviderDef(name="OpenAI", module="openai", aliases=["openai"]), + "watsonx": ProviderDef(name="watsonx", module="watsonx", aliases=["watsonx", "ibm"]), +} diff --git a/python/beeai_framework/backend/errors.py b/python/beeai_framework/backend/errors.py new file mode 100644 index 00000000..bea14d05 --- /dev/null +++ b/python/beeai_framework/backend/errors.py @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: Apache-2.0 + +from beeai_framework.errors import FrameworkError + + +class BackendError(FrameworkError): + def __init__( + self, + message: str = "Backend error", + *, + is_fatal: bool = True, + is_retryable: bool = False, + cause: Exception | None = None, + ) -> None: + super().__init__(message, is_fatal=is_fatal, is_retryable=is_retryable, cause=cause) + + +class ChatModelError(BackendError): + def __init__(self, message: str = "Chat Model error", *, cause: Exception | None = None) -> None: + super().__init__(message, is_fatal=True, is_retryable=False, cause=cause) + + +class MessageError(FrameworkError): + def __init__(self, message: str = "Message Error", *, cause: Exception | None = None) -> None: + super().__init__(message, is_fatal=True, is_retryable=False, cause=cause) diff --git a/python/beeai_framework/backend/message.py b/python/beeai_framework/backend/message.py new file mode 100644 index 00000000..d82ad5e0 --- /dev/null +++ b/python/beeai_framework/backend/message.py @@ -0,0 +1,141 @@ +# SPDX-License-Identifier: Apache-2.0 + +import json +from abc import abstractmethod +from datetime import UTC, datetime +from enum import Enum +from typing import Any, Generic, Literal, TypeVar + +from pydantic import BaseModel, Field + +from beeai_framework.backend import MessageError + +T = TypeVar("T", bound=BaseModel) +MessageMeta = dict[str, Any] + + +class Role(str, Enum): + ASSISTANT: str = "assistant" + SYSTEM: str = "system" + TOOL: str = "tool" + USER: str = "user" + + def __str__(self) -> str: + return self.value + + @classmethod + def values(cls) -> set[str]: + return {value for key, value in vars(cls).items() if not key.startswith("_") and isinstance(value, str)} + + +class ToolResult(BaseModel): + type: Literal["tool-result"] + result: Any + tool_name: str = Field(alias="toolName") + tool_call_id: str = Field(alias="toolCallId") + + +class MessageInput(BaseModel): + role: Role | str + text: str + meta: MessageMeta | None = None + + +class Message(Generic[T]): + role: Role | str + content: T + + def __init__(self, content: T | list[T], meta: MessageMeta | None = None) -> None: + if meta and not meta.get("createdAt"): + meta["createdAt"] = datetime.now(tz=UTC) + + if isinstance(content, str): + self.content = [self.from_string(text=content)] + elif isinstance(content, list): + self.content = content + else: + self.content = [content] + + @property + def role(self) -> Role | str: + return self._role + + @property + def text(self) -> str: + return "".join([x.get("text") for x in self.get_texts()]) + + @abstractmethod + def from_string(self, text: str) -> T: + pass + + def get_texts(self) -> list[T]: + return list(filter(lambda x: x.get("type") == "text", self.content)) + + def to_plain(self) -> dict[str, Any]: + return {"role": self.role, "content": self.text} + + @classmethod + def of(cls, message_data: dict[str, str]) -> "Message": + message_input = MessageInput.model_validate(message_data, strict=True) + if message_input.role == Role.USER: + return UserMessage(message_input.text, message_input.meta) + elif message_input.role == Role.ASSISTANT: + return AssistantMessage(message_input.text, message_input.meta) + elif message_input.role == Role.SYSTEM: + return SystemMessage(message_input.text, message_input.meta) + elif message_input.role == Role.TOOL: + return ToolMessage(message_input.text, message_input.meta) + else: + return CustomMessage(message_input.role, message_input.text, message_input.meta) + + +class AssistantMessage(Message): + _role = Role.ASSISTANT + + def from_string(self, text: str) -> T: + return {"type": "text", "text": text} + + def get_tool_calls(self) -> list[T]: + return filter(lambda x: x.get("type") == "tool-call", self.content) + + +class ToolMessage(Message): + _role = Role.TOOL + + def from_string(self, text: str) -> ToolResult: + tool_result = ToolResult.model_validate(json.loads(text)) + return tool_result.model_dump(by_alias=True) + + def get_tool_results(self) -> list[T]: + return filter(lambda x: x.get("type") == "tool-result", self.content) + + +class SystemMessage(Message): + _role = Role.SYSTEM + + def from_string(self, text: str) -> T: + return {"type": "text", "text": text} + + +class UserMessage(Message): + _role = Role.USER + + def from_string(self, text: str) -> T: + return {"type": "text", "text": text} + + def get_images(self) -> list[T]: + return filter(lambda x: x.get("type") == "image", self.content) + + def get_files(self) -> list[T]: + return filter(lambda x: x.get("type") == "file", self.content) + + +class CustomMessage(Message): + def __init__(self, role: str, content: T, meta: MessageMeta = None) -> None: + super().__init__(content, meta) + if not role: + raise MessageError("Role must be specified!") + self._role = role + + def from_string(self, text: str) -> dict[str, Any]: + return {"type": "text", "text": text} diff --git a/python/beeai_framework/backend/utils.py b/python/beeai_framework/backend/utils.py new file mode 100644 index 00000000..5cbc84bb --- /dev/null +++ b/python/beeai_framework/backend/utils.py @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: Apache-2.0 + +import json +from importlib import import_module +from typing import Any, Literal, TypeVar + +from beeai_framework.backend.constants import ( + BackendProviders, + ProviderDef, + ProviderModelDef, + ProviderName, +) +from beeai_framework.backend.errors import BackendError + +T = TypeVar("T") + +# TODO: `${ProviderName}:${string}` +FullModelName: str + + +def find_provider_def(value: str) -> ProviderDef | None: + for provider in BackendProviders.values(): + if value == provider.name or value == provider.module or value in provider.aliases: + return provider + return None + + +def parse_model(name: str) -> ProviderModelDef: + if not name: + raise BackendError("Neither 'provider' nor 'provider:model' was specified.") + + # provider_id:model_id + # e.g., ollama:llama3.1 + # keep remainder of string intact (maxsplit=1) because model name can also have colons + name_parts = name.split(":", maxsplit=1) + provider_def = find_provider_def(name_parts[0]) + + if not provider_def: + raise BackendError("Model does not contain provider name!") + + return ProviderModelDef( + provider_id=name_parts[0], + model_id=name_parts[1] if len(name_parts) > 1 else None, + provider_def=provider_def, + ) + + +async def load_model(name: ProviderName | str, model_type: Literal["embedding", "chat"] = "chat") -> type[T]: + parsed = parse_model(name) + provider_def = parsed.provider_def + + # TODO: rename `beeai` to `beeai_framework` + module_path = f"beeai_framework.adapters.{provider_def.module}.backend.{model_type}" + module = import_module(module_path) + + class_name = f"{provider_def.name.capitalize()}{model_type.capitalize()}Model" + return getattr(module, class_name) + + +def parse_broken_json(input: str, options: dict | None = None) -> dict[str, Any]: + input = (input or "").strip() + try: + return json.loads(input) + except Exception as ex: + # TODO: handle parsing errors + raise BackendError("Failed to parse JSON") from ex diff --git a/python/beeai_framework/cancellation.py b/python/beeai_framework/cancellation.py new file mode 100644 index 00000000..539ca023 --- /dev/null +++ b/python/beeai_framework/cancellation.py @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: Apache-2.0 + +import contextlib +import threading +from collections.abc import Callable + +from pydantic import BaseModel + +from beeai_framework.utils.custom_logger import BeeLogger + +logger = BeeLogger(__name__) + + +class AbortSignal(BaseModel): + def __init__(self) -> None: + super().__init__() + self._aborted = False + self._reason: str | None = None + self._listeners: list[Callable] = [] + + @property + def aborted(self) -> bool: + return self._aborted + + @property + def reason(self) -> str: + return self._reason + + def add_event_listener(self, callback: Callable[[], None]) -> None: + self._listeners.append(callback) + + def remove_event_listener(self, callback: Callable[[], None]) -> None: + with contextlib.suppress(ValueError): + self._listeners.remove(callback) + + def _abort(self, reason: str | None = None) -> None: + self._aborted = True + self._reason = reason + for callback in self._listeners: + if callback: + callback() + + @classmethod + def timeout(cls, duration: int) -> "AbortSignal": + signal = cls() + + def _callback() -> None: + signal._timer.cancel() + signal._abort(f"Operation timed out after {duration} ms") + + signal._timer = threading.Timer(duration * 1.0, _callback) + signal._timer.start() + + return signal + + +class AbortController: + def __init__(self) -> None: + self._signal = AbortSignal() + + @property + def signal(self) -> AbortSignal: + return self._signal + + def abort(self, reason: str | None = None) -> None: + self._signal._abort(reason) + + +def register_signals(controller: AbortController, signals: list[AbortSignal]) -> None: + def trigger_abort(reason: str | None = None) -> None: + controller.abort(reason) + + for signal in filter(lambda x: x is not None, signals): + if signal.aborted: + trigger_abort(signal.reason) + signal.add_event_listener(trigger_abort) diff --git a/python/beeai_framework/context.py b/python/beeai_framework/context.py new file mode 100644 index 00000000..b0520cd6 --- /dev/null +++ b/python/beeai_framework/context.py @@ -0,0 +1,160 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio +import uuid +from collections.abc import Awaitable, Callable +from contextvars import ContextVar +from dataclasses import dataclass +from datetime import UTC, datetime +from typing import Any, Self, TypeVar + +from pydantic import BaseModel + +from beeai_framework.cancellation import AbortController, AbortSignal, register_signals +from beeai_framework.emitter import Emitter, EmitterInput, EventTrace +from beeai_framework.errors import FrameworkError +from beeai_framework.utils.custom_logger import BeeLogger + +R = TypeVar("R") +GetRunContext = TypeVar("GetRunContext", bound="RunContext") + +logger = BeeLogger(__name__) + + +@dataclass +class RunInstance: + emitter: Emitter + + +class RunContextInput(BaseModel): + params: Any + signal: AbortSignal | None = None + + +class Run: + def __init__(self, handler: Callable[[], R], context: GetRunContext) -> None: + super().__init__() + self.handler = handler + self.tasks: list[tuple[Callable[[Emitter], Awaitable[None]], Any]] = [] + self.run_context = context + + def __await__(self) -> R: + return self._run_tasks().__await__() + + def observe(self, fn: Callable[[Emitter], Awaitable[None]]) -> Self: + # self.tasks.append(lambda: fn(self.context.emitter)) + self.tasks.append((fn, self.run_context.emitter)) + return self + + def context(self, context: GetRunContext) -> Self: + # self.tasks.append(lambda: self._set_context(context)) + self.tasks.append((self._set_context, context)) + return self + + def middleware(self, fn: Callable[[GetRunContext], None]) -> Self: + # self.tasks.append(lambda: fn(self.context)) + self.tasks.append((fn, self.run_context)) + return self + + async def _run_tasks(self) -> R: + for fn, param in self.tasks: + await fn(param) + self.tasks.clear() + return await self.handler() + + def _set_context(self, context: GetRunContext) -> None: + self.context.context = context + self.context.emitter.context = context + + +class RunContext(RunInstance): + storage: ContextVar[Self] = ContextVar("storage", default=None) + + def __init__( + self, *, instance: RunInstance, context_input: RunContextInput, parent: GetRunContext | None = None + ) -> None: + self.instance = instance + self.context_input = context_input + self.created_at = datetime.now(tz=UTC) + self.run_params = context_input.params + self.run_id = str(uuid.uuid4()) + self.parent_id = parent.run_id if parent else None + self.group_id = parent.group_id if parent else str(uuid.uuid4()) + self.context = {k: v for k, v in parent.context.items() if k not in ["id", "parentId"]} if parent else {} + + self.emitter = self.instance.emitter.child( + EmitterInput( + context=self.context, + trace=EventTrace( + id=self.group_id, + run_id=self.run_id, + parent_run_id=parent.run_id if parent else None, + ), + ) + ) + + if parent: + self.emitter.pipe(parent.emitter) + + self.controller = AbortController() + parent_signals = [context_input.signal] if parent else [] + register_signals(self.controller, [context_input.signal, *parent_signals]) + + @property + def signal(self) -> AbortSignal: + return self.controller.signal + + def destroy(self) -> None: + self.emitter.destroy() + self.controller.abort("Context destroyed.") + + @staticmethod + def enter(instance: RunInstance, context_input: RunContextInput, fn: Callable[[GetRunContext], R]) -> Run: + parent = RunContext.storage.get() + context = RunContext(instance=instance, context_input=context_input, parent=parent) + + async def handler() -> R: + emitter = context.emitter.child( + EmitterInput(namespace=["run"], creator=context, context={"internal": True}) + ) + + try: + await emitter.emit("start", None) + + async def _context_storage_run() -> R: + RunContext.storage.set(context) + return await fn(context) + + async def _context_signal_aborted() -> str: + cancel_future = asyncio.get_event_loop().create_future() + + def _on_abort() -> None: + if not cancel_future.done() and not cancel_future.cancelled(): + cancel_future.set_result(context.signal.reason) + + context.signal.add_event_listener(_on_abort) + await cancel_future + + abort_task = asyncio.create_task( + _context_signal_aborted(), + name="abort-task", + ) + runner_task = asyncio.create_task(_context_storage_run(), name="run-task") + + result = None + for first_done in asyncio.as_completed([abort_task, runner_task]): + result = await first_done + abort_task.cancel() + break + + await emitter.emit("success", result) + return result + except Exception as e: + error = FrameworkError.ensure(e) + await emitter.emit("error", error) + raise + finally: + await emitter.emit("finish", None) + context.destroy() + + return Run(handler, context) diff --git a/python/beeai_framework/emitter/__init__.py b/python/beeai_framework/emitter/__init__.py new file mode 100644 index 00000000..e2beb309 --- /dev/null +++ b/python/beeai_framework/emitter/__init__.py @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: Apache-2.0 + +from beeai_framework.emitter.emitter import ( + Callback, + CleanupFn, + Emitter, + EventMeta, + Listener, + Matcher, + MatcherFn, +) +from beeai_framework.emitter.errors import EmitterError +from beeai_framework.emitter.types import EmitterInput, EmitterOptions, EventTrace + +__all__ = [ + "Callback", + "CleanupFn", + "Emitter", + "EmitterError", + "EmitterInput", + "EmitterOptions", + "EventMeta", + "EventTrace", + "Listener", + "Matcher", + "MatcherFn", +] diff --git a/python/beeai_framework/emitter/emitter.py b/python/beeai_framework/emitter/emitter.py new file mode 100644 index 00000000..a04bb340 --- /dev/null +++ b/python/beeai_framework/emitter/emitter.py @@ -0,0 +1,192 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio +import copy +import functools +import uuid +from collections.abc import Callable +from datetime import UTC, datetime +from typing import Any, Generic, TypeVar + +from pydantic import BaseModel, InstanceOf + +from beeai_framework.emitter.errors import EmitterError +from beeai_framework.emitter.types import EmitterInput, EmitterOptions, EventTrace +from beeai_framework.emitter.utils import ( + assert_valid_name, + assert_valid_namespace, +) + +T = TypeVar("T", bound=BaseModel) + +MatcherFn = Callable[["EventMeta"], bool] +Matcher = str | MatcherFn +Callback = Callable[[Any, "EventMeta"], Any] +CleanupFn = Callable[[], None] + + +class Listener(BaseModel, frozen=True): + match: MatcherFn + raw: Matcher + callback: Callback + options: InstanceOf[EmitterOptions] | None = None + + +class EventMeta(BaseModel): + id: str + name: str + path: str + created_at: datetime + source: InstanceOf["Emitter"] + creator: object + context: object + group_id: str | None = None + trace: InstanceOf[EventTrace] | None = None + + +class Emitter(Generic[T]): + def __init__(self, emitter_input: EmitterInput | None = None) -> None: + super().__init__() + + self.listeners: set[Listener] = set() + self.group_id: str | None = None + self.namespace: list[str] = [] + self.creator: object | None = None + self.context: object = {} + self.trace: EventTrace | None = None + self.cleanups: list[CleanupFn] = [] + + if emitter_input: + self.group_id = emitter_input.group_id + self.namespace = emitter_input.namespace if emitter_input.namespace else [] + self.creator = emitter_input.creator if emitter_input.creator else object() + self.context = emitter_input.context if emitter_input.context else {} + self.trace = emitter_input.trace + + assert_valid_namespace(self.namespace) + + @staticmethod + @functools.cache + def root() -> "Emitter": + return Emitter(EmitterInput(creator=object())) + + def child(self, emitter_input: EmitterInput | None = None) -> "Emitter": + if emitter_input is None: + emitter_input = EmitterInput() + + child_emitter = Emitter( + EmitterInput( + trace=emitter_input.trace if emitter_input.trace is not None else self.trace, + group_id=emitter_input.group_id if emitter_input.group_id is not None else self.group_id, + context={**self.context, **(emitter_input.context if emitter_input.context else {})}, + creator=emitter_input.creator if emitter_input.creator is not None else self.creator, + namespace=emitter_input.namespace + self.namespace if emitter_input.namespace else self.namespace[:], + ) + ) + + cleanup = child_emitter.pipe(self) + self.cleanups.append(cleanup) + + return child_emitter + + def pipe(self, target: "Emitter") -> CleanupFn: + return self.on( + "*.*", + target.invoke, + EmitterOptions( + is_blocking=True, + once=False, + persistent=True, + ), + ) + + def destroy(self) -> None: + self.listeners.clear() + for cleanup in self.cleanups: + cleanup() + self.cleanups.clear() + + def on(self, event: str, callback: Callback, options: EmitterOptions | None = None) -> CleanupFn: + return self.match(event, callback, options) + + def match(self, matcher: Matcher, callback: Callback, options: EmitterOptions | None = None) -> CleanupFn: + def create_matcher() -> MatcherFn: + matchers: list[MatcherFn] = [] + match_nested = options.match_nested if options else False + + if matcher == "*": + match_nested = False if match_nested is None else match_nested + matchers.append(lambda event: event.path == ".".join([*self.namespace, event.name])) + elif matcher == "*.*": + match_nested = True if match_nested is None else match_nested + matchers.append(lambda _: True) + # TODO is_valid_regex matches on all strings, not just regex patterns + # elif is_valid_regex(matcher): + # match_nested = True if match_nested is None else match_nested + # matchers.append(lambda event: bool(re.search(matcher, event.path))) + elif isinstance(matcher, Callable): + match_nested = False if match_nested is None else match_nested + matchers.append(matcher) + elif isinstance(matcher, str): + if "." in matcher: + match_nested = True if match_nested is None else match_nested + matchers.append(lambda event: event.path == matcher) + else: + match_nested = False if match_nested is None else match_nested + matchers.append( + lambda event: event.name == matcher and event.path == ".".join([*self.namespace, event.name]) + ) + else: + raise EmitterError("Invalid matcher provided!") + + if not match_nested: + + def match_same_run(event: EventMeta) -> bool: + return self.trace is None or self.trace.run_id == event.trace.run_id + + matchers.insert(0, match_same_run) + + return lambda event: all(match_fn(event) for match_fn in matchers) + + listener = Listener(match=create_matcher(), raw=matcher, callback=callback, options=options) + self.listeners.add(listener) + + return lambda: self.listeners.remove(listener) + + async def emit(self, name: str, value: Any) -> None: + assert_valid_name(name) + + event = self.create_event(name) + await self.invoke(value, event) + + async def invoke(self, data: Any, event: EventMeta) -> None: + executions = [] + for listener in self.listeners: + if not listener.match(event): + continue + + if listener.options and listener.options.once: + self.listeners.remove(listener) + + async def run(ln: Listener = listener) -> Any: + return await ln.callback(data, event) + + if listener.options and listener.options.is_blocking: + executions.append(run()) + else: + executions.append(asyncio.create_task(run())) + + await asyncio.gather(*executions) + + def create_event(self, name: str) -> EventMeta: + return EventMeta( + id=str(uuid.uuid4()), + group_id=self.group_id, + name=name, + path=".".join([*self.namespace, name]), + created_at=datetime.now(tz=UTC), + source=self, + creator=self.creator, + context={**self.context}, + trace=copy.copy(self.trace), + ) diff --git a/python/beeai_framework/emitter/errors.py b/python/beeai_framework/emitter/errors.py new file mode 100644 index 00000000..c118931a --- /dev/null +++ b/python/beeai_framework/emitter/errors.py @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: Apache-2.0 + +from beeai_framework.errors import FrameworkError + + +class EmitterError(FrameworkError): + """Raised for errors caused by emitters.""" + + def __init__(self, message: str = "Emitter error", *, cause: Exception | None = None) -> None: + super().__init__(message, is_fatal=True, is_retryable=False, cause=cause) diff --git a/python/beeai_framework/emitter/types.py b/python/beeai_framework/emitter/types.py new file mode 100644 index 00000000..78bd8083 --- /dev/null +++ b/python/beeai_framework/emitter/types.py @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: Apache-2.0 + +from pydantic import BaseModel, InstanceOf + + +class EventTrace(BaseModel): + id: str + run_id: str + parent_run_id: str | None = None + + +class EmitterInput(BaseModel): + group_id: str | None = None + namespace: list[str] | None = None + creator: object | None = None + context: object | None = None + trace: InstanceOf[EventTrace] | None = None + + +class EmitterOptions(BaseModel, frozen=True): + is_blocking: bool | None = None + once: bool | None = None + persistent: bool | None = None + match_nested: bool | None = None diff --git a/python/beeai_framework/emitter/utils.py b/python/beeai_framework/emitter/utils.py new file mode 100644 index 00000000..dd9b57e5 --- /dev/null +++ b/python/beeai_framework/emitter/utils.py @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: Apache-2.0 + +import re + +from beeai_framework.emitter.errors import EmitterError + + +def assert_valid_name(name: str) -> None: + if not name or not re.match("^[a-zA-Z0-9_]+$", name): + raise EmitterError( + "Event name or a namespace part must contain only letters, numbers or underscores.", + ) + + +def assert_valid_namespace(path: list[str]) -> None: + for part in path: + assert_valid_name(part) diff --git a/python/beeai_framework/errors.py b/python/beeai_framework/errors.py new file mode 100644 index 00000000..f05c388a --- /dev/null +++ b/python/beeai_framework/errors.py @@ -0,0 +1,179 @@ +# SPDX-License-Identifier: Apache-2.0 + +from asyncio import CancelledError + + +class FrameworkError(Exception): + """ + Base class for Framework errors which extends Exception + All errors should extend from this base class. + """ + + def __init__( + self, + message: str = "Framework error", + *, + is_fatal: bool = True, + is_retryable: bool = False, + cause: Exception | None = None, + ) -> None: + super().__init__(message) + + # TODO: What other attributes should all our framework errors have? + self.message = message + self._is_fatal = is_fatal + self._is_retryable = is_retryable + self.__cause__ = cause + + @staticmethod + def __get_message(error: Exception) -> str: + """ + Get message from exception, but use classname if none (for dump/explain) + """ + message = str(error) if len(str(error)) > 0 else type(error).__name__ + return message + + def is_retryable(self) -> bool: + """is error retryable?.""" + return self._is_retryable + + def is_fatal(self) -> bool: + """is error fatal?""" + return self._is_fatal + + def name(self) -> str: + """get name (class) of this error""" + return type(self).__name__ + + def has_fatal_error(self) -> bool: + """ + Check if this error or any in the chain of exceptions under __cause__ is fatal (iterative). + """ + current_exception = self # Start with the current exception + + while current_exception is not None: + if isinstance(current_exception, FrameworkError) and current_exception.is_fatal(): + return True # Found a fatal FrameworkError + current_exception = current_exception.__cause__ # Move to the next exception in the chain + + return False # No fatal FrameworkError found in the chain + + # TODO: Better method name could be 'get_nested_exceptions' + def traverse_errors(self) -> list[Exception]: + """ + Traverses all nested exceptions (iterative implementation). + """ + exceptions: list[Exception] = [] + current_exception: Exception = self # Start with the current exception + + while current_exception is not None: + exceptions.append(current_exception) + current_exception = current_exception.__cause__ + + return exceptions + + def get_cause(self) -> Exception: + """ + finds the innermost exception - deemed to be cause + """ + deepest_cause = self + + while deepest_cause.__cause__ is not None: + deepest_cause = deepest_cause.__cause__ + + return deepest_cause + + # TODO: Copied across from typescript - need to check on desired output + + def explain(self) -> str: + """ + Return a string to explain the error, suitable for the LLM (iterative). + """ + lines = [] + current_exception = self + indent_level = 0 + + while current_exception: + prefix = f"{indent_level * ' '}" + if indent_level > 0: + prefix += "Caused by: " + + message = f"{prefix}{self.__get_message(current_exception)}" + lines.append(message) + + current_exception = current_exception.__cause__ + indent_level += 1 + + return "\n".join(lines) + + # TODO: Desired output format needs reviewing (or just dump full exception as string with stacktraces) + def dump(self) -> str: + """ + Produce a string representation of the error suitable for debugging (iterative). + """ + lines = [] + current_exception = self + indent_level = 0 + + while current_exception: + prefix = f"{indent_level * ' '}" + if indent_level > 0: + prefix += "Caused By: " + + # TODO Needs generalization by checking attributes - helps when classes extended in future + if isinstance(current_exception, FrameworkError): + fatal = "Fatal" if current_exception.is_fatal() else "" + retryable = "Retryable" if current_exception.is_retryable() else "" + class_name = current_exception.name() + message = current_exception.message + line = f"{prefix}Class: {class_name}, Fatal: {fatal}, Retryable: {retryable}, Message: {message}" + else: + class_name = type(current_exception).__name__ + message = str(current_exception) + line = f"{prefix}Class: {class_name}, Message: {message}" + + lines.append(line) + + current_exception = current_exception.__cause__ + indent_level += 1 + + return "\n".join(lines) + + @staticmethod + def ensure(error: Exception) -> "FrameworkError": + """ + Ensure we have a FrameworkError - create and wrap error passed if required + """ + if isinstance(error, FrameworkError): + return error + return FrameworkError(message=str(error), cause=error) + + # TODO: Remove? Just use isinstance? + @staticmethod + def is_instance_of(obj: Exception) -> bool: + """Static method to check if the given object is an instance of FrameworkError.""" + return isinstance(obj, FrameworkError) + + +class UnimplementedError(FrameworkError): + """ + Raised when a method or function has not been implemented. + """ + + def __init__(self, message: str = "Not implemented!", *, cause: Exception | None = None) -> None: + super().__init__(message, is_fatal=True, is_retryable=False, cause=cause) + + +class ArgumentError(FrameworkError): + """Raised for invalid or unsupported values.""" + + def __init__(self, message: str = "Provided value is not supported!", *, cause: Exception | None = None) -> None: + # TODO is a value error fatal. It is with same value... + super().__init__(message, is_fatal=True, is_retryable=False, cause=cause) + + +class AbortError(FrameworkError, CancelledError): + """Raised when an operation has been aborted.""" + + def __init__(self, message: str = "Operation has been aborted!") -> None: + super().__init__(message, is_fatal=True, is_retryable=False) diff --git a/python/beeai_framework/llms/__init__.py b/python/beeai_framework/llms/__init__.py new file mode 100644 index 00000000..3ca3a0d5 --- /dev/null +++ b/python/beeai_framework/llms/__init__.py @@ -0,0 +1,13 @@ +from beeai_framework.llms.base_output import BaseChatLLMOutput, BaseLLMOutput +from beeai_framework.llms.llm import LLM, AgentInput, BaseLLM +from beeai_framework.llms.output import ChatLLMOutput, ChatOutput + +__all__ = [ + "LLM", + "AgentInput", + "BaseChatLLMOutput", + "BaseLLM", + "BaseLLMOutput", + "ChatLLMOutput", + "ChatOutput", +] diff --git a/python/beeai_framework/llms/base_output.py b/python/beeai_framework/llms/base_output.py new file mode 100644 index 00000000..2956c126 --- /dev/null +++ b/python/beeai_framework/llms/base_output.py @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: Apache-2.0 + +from abc import ABC, abstractmethod +from collections.abc import Sequence +from dataclasses import dataclass + +from beeai_framework.backend import Message + + +@dataclass +class BaseLLMOutput: + """Base class for LLM outputs.""" + + pass + + +class BaseChatLLMOutput(BaseLLMOutput, ABC): + """Abstract base class for chat LLM outputs.""" + + @property + @abstractmethod + def messages(self) -> Sequence[Message]: + """Get the messages from the LLM output. + Returns: + Sequence[Message]: A read-only sequence of messages + """ + raise NotImplementedError diff --git a/python/beeai_framework/llms/llm.py b/python/beeai_framework/llms/llm.py new file mode 100644 index 00000000..a32510f6 --- /dev/null +++ b/python/beeai_framework/llms/llm.py @@ -0,0 +1,133 @@ +# SPDX-License-Identifier: Apache-2.0 + +import math +import re +from abc import ABC, abstractmethod +from collections.abc import AsyncGenerator +from dataclasses import dataclass +from typing import Any, Generic, TypeVar + +from litellm import completion + +from beeai_framework.backend import Message, Role +from beeai_framework.llms.base_output import BaseChatLLMOutput +from beeai_framework.llms.output import ChatLLMOutput, ChatOutput +from beeai_framework.memory.base_memory import BaseMemory +from beeai_framework.tools.tool import Tool +from beeai_framework.utils.custom_logger import BeeLogger +from beeai_framework.utils.templates import Prompt + +T = TypeVar("T", bound="BaseChatLLMOutput") +logger = BeeLogger(__name__) + + +class BaseLLM(Generic[T], ABC): + """Abstract base class for Language Model implementations.""" + + base_url: str | None + model: str | None + + def __init__(self, base_url: str | None = None, model: str | None = None) -> None: + self.base_url = base_url + + if "/" not in model: + self.model = f"ollama_chat/{model}" + else: + self.model = model if not model.startswith("ollama/") else f"{model.replace('ollama/', 'ollama_chat/')}" + + @abstractmethod + def inference(self, input: list[Message], options: Any) -> T: + pass + + # TODO: define the type annotation for output (Message???) and remove noqa + @abstractmethod + def parse_output(self, output, tools: list[Tool]) -> str: # noqa: ANN001 + pass + + def generate(self, prompt: Prompt | list[Message], options: Any = None) -> T: + if type(prompt) is dict: # noqa: SIM108 + input = [Message.of({"role": Role.USER, "text": prompt.get("prompt")})] + else: + input = prompt + + answer = self.inference(input, options) + return answer + + @abstractmethod + def tokenize(self, input: str) -> T: + pass + + +class LLM(BaseLLM[BaseChatLLMOutput]): + parameters: dict[str, Any] + chat_endpoint = "/api/chat" + + def __init__( + self, + model: str = "ollama_chat/llama3.1", + base_url: str | None = None, + api_key: str | None = None, + parameters: dict[str, Any] | None = None, + ) -> None: + if parameters is None: + parameters = {} + self.api_key = api_key + + h = base_url[:-1] if base_url and base_url.endswith("/") else base_url + self.parameters = { + "temperature": 0, + "repeat_penalty": 1.0, + "num_predict": 2048, + } | parameters + + super().__init__(h, model) + + # TODO: add return type and remove noqa + def prepare_messages(self, input: list[Message]): # noqa: ANN201 + return [{"role": x.role, "content": x.text} for x in input] + + def inference(self, input: list[Message], options: Any) -> T: + messages = self.prepare_messages(input) + logger.trace(f"LLM input:\n{messages}") + response = completion( + model=self.model, messages=messages, base_url=self.base_url, api_key=self.api_key, **options + ) + + logger.debug(f"Inference response choices size: {len(response.choices)}") + response_content = response.get("choices", [{}])[0].get("message", {}).get("content", "") + logger.trace(f"Inference response content:\n{response_content}") + + return ChatLLMOutput(output=ChatOutput(response=response_content)) + + async def stream(self, input: list[Message], options: Any) -> AsyncGenerator[str, None]: + messages = self.prepare_messages(input) + response = completion( + model=self.model, messages=messages, base_url=self.base_url, api_key=self.api_key, stream=True, **options + ) + for chunk in response: + yield chunk.choices[0].delta.content or "" + + def tokenize(self, input: str) -> T: + return {"tokens_count": math.ceil(len(input) / 4)} + + # TODO: define the type annotation for output (Message???) and remove noqa + def parse_output(self, output, tools: list[Tool]) -> None: # noqa: ANN001 + if len(tools): + regex = ( + r"Thought: .+\n+(?:Final Answer: [\s\S]+|Function Name: (" + + "|".join([x.name for x in tools]) + + ")\n+Function Input: \\{.*\\}(\n+Function Output:)?)" + ) + else: + regex = r"Thought: .+\n+Final Answer: [\s\S]+" + r = re.search(regex, output.text) + if r is not None: + return r.group() + + +@dataclass +class AgentInput(Generic[T]): + """Input configuration for agent initialization.""" + + llm: BaseLLM[T] + memory: "BaseMemory" diff --git a/python/beeai_framework/llms/output.py b/python/beeai_framework/llms/output.py new file mode 100644 index 00000000..6976df74 --- /dev/null +++ b/python/beeai_framework/llms/output.py @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: Apache-2.0 + +from collections.abc import Sequence +from dataclasses import dataclass + +from beeai_framework.backend import AssistantMessage +from beeai_framework.llms.base_output import BaseChatLLMOutput + + +@dataclass +class ChatOutput: + """Represents a chat output from Ollama LLM.""" + + response: str + + def to_messages(self) -> list[AssistantMessage]: + """Convert the response to a list of messages.""" + return [AssistantMessage(self.response)] + + +@dataclass +class ChatLLMOutput(BaseChatLLMOutput): + """Concrete implementation of ChatLLMOutput for Ollama.""" + + output: ChatOutput + + @property + def messages(self) -> Sequence[AssistantMessage]: + return self.output.to_messages() diff --git a/python/beeai_framework/memory/__init__.py b/python/beeai_framework/memory/__init__.py new file mode 100644 index 00000000..4c3a5afe --- /dev/null +++ b/python/beeai_framework/memory/__init__.py @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: Apache-2.0 + +from beeai_framework.memory.base_cache import BaseCache +from beeai_framework.memory.base_memory import BaseMemory +from beeai_framework.memory.errors import ResourceError, ResourceFatalError, SerializerError +from beeai_framework.memory.file_cache import FileCache +from beeai_framework.memory.readonly_memory import ReadOnlyMemory +from beeai_framework.memory.serializable import Serializable +from beeai_framework.memory.serializer import Serializer +from beeai_framework.memory.sliding_cache import SlidingCache +from beeai_framework.memory.task_map import SlidingTaskMap, Task +from beeai_framework.memory.token_memory import TokenMemory +from beeai_framework.memory.unconstrained_cache import UnconstrainedCache +from beeai_framework.memory.unconstrained_memory import UnconstrainedMemory + +__all__ = [ + "BaseCache", + "BaseMemory", + "FileCache", + "ReadOnlyMemory", + "ResourceError", + "ResourceFatalError", + "Serializable", + "Serializer", + "SerializerError", + "SlidingCache", + "SlidingMemory", + "SlidingTaskMap", + "SummarizeMemory", + "Task", + "TokenMemory", + "UnconstrainedCache", + "UnconstrainedMemory", +] diff --git a/python/beeai_framework/memory/base_cache.py b/python/beeai_framework/memory/base_cache.py new file mode 100644 index 00000000..c9548b6c --- /dev/null +++ b/python/beeai_framework/memory/base_cache.py @@ -0,0 +1,109 @@ +# SPDX-License-Identifier: Apache-2.0 + +from abc import ABC, abstractmethod +from collections.abc import Iterator +from typing import Any, Generic, TypeVar + +T = TypeVar("T") + + +class BaseCache(ABC, Generic[T]): + """Abstract base class for all Cache implementations.""" + + def __init__(self) -> None: + """Initialize the cache with an empty elements dictionary.""" + self._elements: dict[str, Any] = {} + self._enabled: bool = True + + @property + def enabled(self) -> bool: + """ + Property that indicates if the cache is enabled. + + Returns: + bool: True if cache is enabled, False otherwise + """ + return self._enabled + + @enabled.setter + def enabled(self, value: bool) -> None: + """ + Set the enabled status of the cache. + + Args: + value (bool): The new enabled status + """ + self._enabled = value + + @property + def elements(self) -> dict[str, Any]: + """ + Property that provides access to the internal elements dictionary. + + Returns: + Dict[str, Any]: The cache elements + """ + return self._elements + + async def serialize(self) -> str: + """Serialize the cache state.""" + snapshot = await self.create_snapshot() + from beeai_framework.memory.serializer import ( + Serializer, # Import here to avoid circular imports + ) + + return await Serializer.serialize( + { + "target": { + "module": self.__class__.__module__, + "name": self.__class__.__name__, + }, + "snapshot": snapshot, + } + ) + + @abstractmethod + async def set(self, key: str, value: Any) -> None: + """Add a element in the cache.""" + pass + + @abstractmethod + async def get(self, key: str) -> Any: + """Get a element in the cache.""" + pass + + @abstractmethod + async def has(self, key: str) -> bool: + """Get a element in the cache.""" + pass + + @abstractmethod + async def delete(self, key: str) -> bool: + """Delete a element in the Cache.""" + pass + + @abstractmethod + def clear(self) -> None: + """Clear all the Cache content.""" + pass + + def size(self) -> int: + """Clear all the Cache content.""" + return len(self.elements) + + def is_empty(self) -> bool: + """Check if memory is empty.""" + return len(self.elements) == 0 + + def __iter__(self) -> Iterator: + return iter(self.elements) + + @abstractmethod + def create_snapshot(self) -> Any: + """Create a serializable snapshot of current state.""" + pass + + @abstractmethod + def load_snapshot(self, state: Any) -> None: + """Restore state from a snapshot.""" + pass diff --git a/python/beeai_framework/memory/base_memory.py b/python/beeai_framework/memory/base_memory.py new file mode 100644 index 00000000..238c014a --- /dev/null +++ b/python/beeai_framework/memory/base_memory.py @@ -0,0 +1,83 @@ +# SPDX-License-Identifier: Apache-2.0 + +from abc import ABC, abstractmethod +from collections.abc import Iterable +from typing import TYPE_CHECKING, Any + +from beeai_framework.backend import Message + +if TYPE_CHECKING: + from beeai_framework.memory.readonly_memory import ReadOnlyMemory + + +class BaseMemory(ABC): + """Abstract base class for all memory implementations.""" + + @property + @abstractmethod + def messages(self) -> list[Message]: + """Return list of stored messages.""" + pass + + @abstractmethod + async def add(self, message: Message, index: int | None = None) -> None: + """Add a message to memory.""" + pass + + @abstractmethod + async def delete(self, message: Message) -> bool: + """Delete a message from memory.""" + pass + + @abstractmethod + def reset(self) -> None: + """Clear all messages from memory.""" + pass + + async def add_many(self, messages: Iterable[Message], start: int | None = None) -> None: + """Add multiple messages to memory.""" + for counter, msg in enumerate(messages): + index = None if start is None else start + counter + await self.add(msg, index) + + async def delete_many(self, messages: Iterable[Message]) -> None: + """Delete multiple messages from memory.""" + for msg in messages: + await self.delete(msg) + + async def splice(self, start: int, delete_count: int, *items: Message) -> list[Message]: + """Remove and insert messages at a specific position.""" + total = len(self.messages) + start = max(total + start, 0) if start < 0 else start + delete_count = min(delete_count, total - start) + + deleted_items = self.messages[start : start + delete_count] + await self.delete_many(deleted_items) + await self.add_many(items, start) + + return deleted_items + + def is_empty(self) -> bool: + """Check if memory is empty.""" + return len(self.messages) == 0 + + def __iter__(self) -> None: + return iter(self.messages) + + @abstractmethod + def create_snapshot(self) -> Any: + """Create a serializable snapshot of current state.""" + pass + + @abstractmethod + def load_snapshot(self, state: Any) -> None: + """Restore state from a snapshot.""" + pass + + def as_read_only(self) -> "ReadOnlyMemory": + """Return a read-only view of this memory.""" + from beeai_framework.memory.readonly_memory import ( # Import here to avoid circular import + ReadOnlyMemory, + ) + + return ReadOnlyMemory(self) diff --git a/python/beeai_framework/memory/errors.py b/python/beeai_framework/memory/errors.py new file mode 100644 index 00000000..4e824d84 --- /dev/null +++ b/python/beeai_framework/memory/errors.py @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: Apache-2.0 + + +from beeai_framework.errors import FrameworkError + + +class ResourceError(FrameworkError): + """Base class for memory-related exceptions.""" + + def __init__( + self, + message: str = "Memory error", + *, + is_fatal: bool = False, + is_retryable: bool = False, + cause: Exception | None = None, + ) -> None: + super().__init__(message, is_fatal=is_fatal, is_retryable=is_retryable, cause=cause) + + +class ResourceFatalError(ResourceError): + """Fatal memory errors that cannot be recovered from.""" + + def __init__(self, message: str = "Memory error - fatal", *, cause: Exception | None = None) -> None: + super().__init__(message, is_fatal=True, is_retryable=False, cause=cause) + + +class SerializerError(FrameworkError): + """Raised for errors caused by serializer.""" + + def __init__(self, message: str = "Serializer error", *, cause: Exception | None = None) -> None: + super().__init__(message, is_fatal=True, is_retryable=False, cause=cause) diff --git a/python/beeai_framework/memory/file_cache.py b/python/beeai_framework/memory/file_cache.py new file mode 100644 index 00000000..3ad10f58 --- /dev/null +++ b/python/beeai_framework/memory/file_cache.py @@ -0,0 +1,238 @@ +# SPDX-License-Identifier: Apache-2.0 + +import os +from collections.abc import Callable +from dataclasses import dataclass +from functools import wraps +from typing import Any, Generic, Self, TypeVar + +import aiofiles + +from beeai_framework.memory.base_cache import BaseCache +from beeai_framework.memory.serializer import Serializer +from beeai_framework.memory.sliding_cache import SlidingCache +from beeai_framework.utils import BeeLogger + +logger = BeeLogger(__name__) + +T = TypeVar("T") + + +def cache() -> Callable: + """Decorator to cache method results.""" + + def decorator(func: Callable) -> Callable: + cache_key = f"_cache_{func.__name__}" + + @wraps(func) + async def wrapper(self: Self, *args: int, **kwargs: int) -> Any: + if not hasattr(self, cache_key): + setattr(self, cache_key, await func(self, *args, **kwargs)) + return getattr(self, cache_key) + + wrapper.clear_cache = lambda self: (delattr(self, cache_key) if hasattr(self, cache_key) else None) + return wrapper + + return decorator + + +@dataclass +class Input: + """Input configuration for FileCache.""" + + full_path: str + + +class FileCache(BaseCache[T], Generic[T]): + """File-based cache implementation.""" + + def __init__(self, input_config: Input) -> None: + """Initialize the FileCache with the given input configuration.""" + super().__init__() + self._input = input_config + self._register() + + @classmethod + def _register(cls) -> None: + """Register the cache class.""" + Serializer.register( + cls, + { + "to_plain": lambda x: x.create_snapshot(), + "from_plain": lambda x: cls.from_snapshot(x), + }, + ) + + @property + def source(self) -> str: + """Get the source file path.""" + return self._input.full_path + + @classmethod + async def from_provider(cls, provider: BaseCache[T], input_config: Input) -> "FileCache[T]": + """Create a new FileCache instance from a provider.""" + async with aiofiles.open(input_config.full_path, "w") as f: + serialized = await provider.serialize() # Await the serialization + await f.write(serialized) + return cls(input_config) + + @cache() + async def _get_provider(self) -> BaseCache[T]: + """Get the cache provider instance.""" + try: + exists = os.path.isfile(self._input.full_path) + except Exception: + exists = False + + if exists: + async with aiofiles.open(self._input.full_path) as f: + serialized = await f.read() + + deserialized = await Serializer.deserialize(serialized) + target = deserialized["target"] + snapshot = deserialized["snapshot"] + + target = Serializer.get_factory(target).ref + instance = target.from_snapshot(snapshot) + + if not isinstance(instance, BaseCache): + raise TypeError("Provided file does not serialize any instance of BaseCache class.") + + return instance + else: + return SlidingCache(size=float("inf"), ttl=float("inf")) + + async def reload(self) -> None: + """Reload the cache from the file.""" + self._get_provider.clear_cache(self) + await self._get_provider() + + async def _save(self) -> None: + """Save the cache to the file.""" + provider = await self._get_provider() + async with aiofiles.open(self._input.full_path, "w") as f: + serialized = await provider.serialize() # Await the serialization + await f.write(serialized) + + async def size(self) -> int: + """Get the number of items in the cache.""" + provider = await self._get_provider() + return await provider.size() + + async def set(self, key: str, value: T) -> None: + """Set a value in the cache.""" + provider = await self._get_provider() + await provider.set(key, value) + try: + await provider.get(key) + finally: + await self._save() + + async def get(self, key: str) -> T: + """Get a value from the cache.""" + provider = await self._get_provider() + return await provider.get(key) + + async def has(self, key: str) -> bool: + """Check if a key exists in the cache.""" + provider = await self._get_provider() + return await provider.has(key) + + async def delete(self, key: str) -> bool: + """Delete a key from the cache.""" + provider = await self._get_provider() + result = await provider.delete(key) + await self._save() + return result + + async def clear(self) -> None: + """Clear all items from the cache.""" + provider = await self._get_provider() + await provider.clear() + await self._save() + + async def create_snapshot(self) -> dict[str, Any]: + """Create a serializable snapshot of the current state.""" + return { + "input": {"full_path": self._input.full_path}, + "provider": await self._get_provider(), + } + + def load_snapshot(self, snapshot: dict[str, Any]) -> None: + """Restore state from a snapshot.""" + for key, value in snapshot.items(): + setattr(self, key, value) + + @classmethod + def from_snapshot(cls, snapshot: dict[str, Any]) -> "FileCache[T]": + """Create an instance from a snapshot.""" + instance = cls(Input(full_path=snapshot["input"]["full_path"])) + instance.load_snapshot(snapshot) + return instance + + +if __name__ == "__main__": + import asyncio + import os + import tempfile + from pathlib import Path + + async def test_file_cache() -> None: + try: + # Create a temporary directory for our test cache files + with tempfile.TemporaryDirectory() as temp_dir: + cache_file = Path(temp_dir) / "test_cache.json" + + logger.info("1. Creating and Testing Basic Cache Operations:") + # Initialize the cache + cache = FileCache[str](Input(str(cache_file))) + + # Test basic operations + logger.info("Setting values in cache...") + await cache.set("key1", "value1") + await cache.set("key2", "value2") + + # Verify values + value1 = await cache.get("key1") + value2 = await cache.get("key2") + logger.info(f"Retrieved values: key1={value1}, key2={value2}") + + # Check existence + has_key = await cache.has("key1") + logger.info(f"Has key1: {has_key}") + + # Get cache size + size = await cache.size() + logger.info(f"Cache size: {size}") + + logger.info("2. Testing File Persistence:") + # Verify file was created + logger.info(f"Cache file exists: {cache_file.exists()}") + logger.info(f"Cache file size: {cache_file.stat().st_size} bytes") + + logger.info("3. Testing Delete Operation:") + # Delete a key + deleted = await cache.delete("key2") + logger.info(f"Deleted key2: {deleted}") + has_key2 = await cache.has("key2") + logger.info(f"Has key2 after delete: {has_key2}") + + logger.info("4. Testing Clear Operation:") + # Clear the cache + await cache.clear() + size = await cache.size() + logger.info(f"Cache size after clear: {size}") + + logger.info("5. Testing Provider Creation:") + # Test with non-existent file + new_file = Path(temp_dir) / "new_cache.json" + new_cache = FileCache[str](Input(str(new_file))) + await new_cache.set("test_key", "test_value") + logger.info(f"Created new cache file: {new_file.exists()}") + logger.info("End of file cache operations") + + except Exception as e: + logger.error(f"Error during test: {e!s}") + + # Run the test + asyncio.run(test_file_cache()) diff --git a/python/beeai_framework/memory/readonly_memory.py b/python/beeai_framework/memory/readonly_memory.py new file mode 100644 index 00000000..2b7ae3a8 --- /dev/null +++ b/python/beeai_framework/memory/readonly_memory.py @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: Apache-2.0 + +from beeai_framework.backend import Message +from beeai_framework.memory.base_memory import BaseMemory + + +class ReadOnlyMemory(BaseMemory): + """Read-only wrapper for a memory instance.""" + + def __init__(self, source: BaseMemory) -> None: + self.source = source + + @property + def messages(self) -> list[Message]: + return self.source.messages + + async def add(self, message: Message, index: int | None = None) -> None: + pass # No-op for read-only memory + + async def delete(self, message: Message) -> bool: + return False # No-op for read-only memory + + def reset(self) -> None: + pass # No-op for read-only memory + + def create_snapshot(self) -> dict: + return {"source": self.source} + + def load_snapshot(self, state: dict) -> None: + self.source = state["source"] + + def as_read_only(self) -> "ReadOnlyMemory": + """Return self since already read-only.""" + return self diff --git a/python/beeai_framework/memory/serializable.py b/python/beeai_framework/memory/serializable.py new file mode 100644 index 00000000..cfb2067f --- /dev/null +++ b/python/beeai_framework/memory/serializable.py @@ -0,0 +1,97 @@ +# SPDX-License-Identifier: Apache-2.0 + +from abc import ABC, abstractmethod +from copy import deepcopy +from typing import Any, ClassVar, TypeVar + +T = TypeVar("T") + + +class Serializable(ABC): + """Base class for all serializable objects.""" + + _registered_classes: ClassVar[dict[str, type["Serializable"]]] = {} + + def __init_subclass__(cls, **kwargs: int) -> None: + """Automatically register subclasses when they're created.""" + super().__init_subclass__(**kwargs) + cls._registered_classes[cls.__name__] = cls + + @classmethod + def register(cls, aliases: list[str] | None = None) -> None: + """Register the class and any aliases for serialization.""" + cls._registered_classes[cls.__name__] = cls + if aliases: + for alias in aliases: + if alias in cls._registered_classes and cls._registered_classes[alias] != cls: + raise ValueError(f"Alias '{alias}' already registered to a different class") + cls._registered_classes[alias] = cls + + @classmethod + def from_serialized(cls: type[T], data: dict[str, Any]) -> T: + """Create an instance from serialized data.""" + instance = cls.__new__(cls) + Serializable.__init__(instance) + instance.load_snapshot(data) + return instance + + @classmethod + def from_snapshot(cls: type[T], data: dict[str, Any]) -> T: + """Create an instance from a snapshot.""" + instance = cls.__new__(cls) + Serializable.__init__(instance) + instance.load_snapshot(data) + return instance + + def serialize(self) -> dict[str, Any]: + """Serialize the object to a dictionary.""" + return {"__class": self.__class__.__name__, "__value": self.create_snapshot()} + + @abstractmethod + def create_snapshot(self) -> dict[str, Any]: + """Create a serializable snapshot of the object's state.""" + raise NotImplementedError + + @abstractmethod + def load_snapshot(self, state: dict[str, Any]) -> None: + """Load object state from a snapshot.""" + raise NotImplementedError + + def clone(self: T) -> T: + """Create a deep copy of the object.""" + snapshot = self.create_snapshot() + return self.__class__.from_snapshot(deepcopy(snapshot)) + + +# Example of how to use the base class: +class ExampleSerializable(Serializable): + def __init__(self, data: str) -> None: + super().__init__() + self.data = data + + @classmethod + def register(cls, aliases: list[str] | None = None) -> None: + """Register with custom aliases.""" + super().register(aliases) + + def create_snapshot(self) -> dict[str, Any]: + return {"data": self.data} + + def load_snapshot(self, state: dict[str, Any]) -> None: + self.data = state["data"] + + +# Usage example: +if __name__ == "__main__": + # Register the class with an alias + ExampleSerializable.register(aliases=["Example", "ExampleClass"]) + + # Create and serialize an instance + obj = ExampleSerializable("test data") + serialized = obj.serialize() + + # Create new instance from serialized data + new_obj = ExampleSerializable.from_serialized(serialized["__value"]) + + # Create a clone + cloned = obj.clone() diff --git a/python/beeai_framework/memory/serializer.py b/python/beeai_framework/memory/serializer.py new file mode 100644 index 00000000..93e7c743 --- /dev/null +++ b/python/beeai_framework/memory/serializer.py @@ -0,0 +1,254 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio +import base64 +import json +from abc import ABC, abstractmethod +from collections.abc import Callable +from datetime import UTC, datetime +from typing import Any, TypeVar + +from beeai_framework.memory.errors import SerializerError +from beeai_framework.memory.task_map import SlidingTaskMap, Task + +T = TypeVar("T") + + +class SerializerFactory: + """Factory for serializable class registration and instantiation.""" + + def __init__(self, cls_ref: type[Any]) -> None: + self.ref = cls_ref + self.module = cls_ref.__module__ + self.name = cls_ref.__name__ + self.to_plain = None + self.from_plain = None + self.create_empty = None + self.update_instance = None + + +class Serializable(ABC): + """Base class for serializable objects.""" + + @classmethod + def register(cls) -> None: + """Register for serialization.""" + Serializer.register_serializable(cls) + + @classmethod + def from_snapshot(cls: type[T], snapshot: dict[str, Any]) -> T: + """Create instance from snapshot.""" + instance = cls() + instance.load_snapshot(snapshot) + return instance + + @abstractmethod + async def create_snapshot(self) -> dict[str, Any]: + """Create serializable snapshot.""" + pass + + @abstractmethod + def load_snapshot(self, snapshot: dict[str, Any]) -> None: + """Restore from snapshot.""" + pass + + +class Serializer: + """Main serializer class.""" + + _factories: dict[str, SerializerFactory] = {} # noqa: RUF012 + + @classmethod + def register_serializable(cls, target_cls: type[Any]) -> None: + """Register a serializable class.""" + # Register with both module name and __main__ + names = [ + f"{target_cls.__module__}.{target_cls.__name__}", + f"__main__.{target_cls.__name__}", + ] + factory = SerializerFactory(target_cls) + factory.to_plain = lambda x: x.create_snapshot() + factory.from_plain = target_cls.from_snapshot + + for name in names: + cls._factories[name] = factory + + @classmethod + def register(cls, target_cls: type[Any], processors: dict[str, Callable]) -> None: + """Register a class with custom processors.""" + names = [ + f"{target_cls.__module__}.{target_cls.__name__}", + f"__main__.{target_cls.__name__}", + ] + factory = SerializerFactory(target_cls) + factory.to_plain = processors.get("to_plain") + factory.from_plain = processors.get("from_plain") + factory.create_empty = processors.get("create_empty") + factory.update_instance = processors.get("update_instance") + + for name in names: + cls._factories[name] = factory + + @classmethod + def get_factory(cls, class_name: str) -> SerializerFactory: + """Get factory for class name.""" + factory = cls._factories.get(class_name) + if not factory: + raise SerializerError(f"Class {class_name} not registered") + return factory + + @classmethod + async def serialize(cls, data: Any) -> str: + """Serialize data to JSON string with async support.""" + + async def serialize_obj(obj: Any) -> Any: + if isinstance(obj, str | int | float | bool) or obj is None: + return obj + + if isinstance(obj, list | tuple): + return [await serialize_obj(item) for item in obj] + + if isinstance(obj, dict): + return {str(k): await serialize_obj(v) for k, v in obj.items()} + + class_name = f"{obj.__class__.__module__}.{obj.__class__.__name__}" + try: + factory = cls.get_factory(class_name) + if factory.to_plain: + snapshot = await obj.create_snapshot() if hasattr(obj, "create_snapshot") else factory.to_plain(obj) + return { + "__type": class_name, + "__value": await serialize_obj(snapshot), + } + except SerializerError: + pass + + raise SerializerError(f"Cannot serialize object of type {class_name}") + + serialized_data = await serialize_obj(data) + return json.dumps(serialized_data) + + @classmethod + async def deserialize(cls, data: str) -> Any: + """Deserialize JSON string to object with async support.""" + + async def deserialize_obj(obj: Any) -> Any: + if isinstance(obj, str | int | float | bool) or obj is None: + return obj + + if isinstance(obj, list): + return [await deserialize_obj(item) for item in obj] + + if isinstance(obj, dict): + if "__type" in obj: + factory = cls.get_factory(obj["__type"]) + if factory.from_plain: + return factory.from_plain(await deserialize_obj(obj["__value"])) + return {k: await deserialize_obj(v) for k, v in obj.items()} + + return obj + + return await deserialize_obj(json.loads(data)) + + +# Register basic types +for type_cls in (list, dict, set): + Serializer.register( + type_cls, + { + "to_plain": lambda x: list(x) if isinstance(x, list | set) else dict(x), + "from_plain": lambda x: type_cls(x), # noqa: B023 + }, + ) + +Serializer.register( + datetime, + { + "to_plain": lambda x: x.isoformat(), + "from_plain": lambda x: datetime.fromisoformat(x), + }, +) + +Serializer.register( + bytes, + { + "to_plain": lambda x: base64.b64encode(x).decode("utf-8"), + "from_plain": lambda x: base64.b64decode(x.encode("utf-8")), + }, +) + +Serializer.register( + SlidingTaskMap, + { + "to_plain": lambda value: { + "config": {"size": value.size, "ttl": value.ttl}, + "entries": list(value.entries()), + }, + "from_plain": lambda data: SlidingTaskMap.from_snapshot(data), + "create_empty": lambda: SlidingTaskMap(size=1, ttl=1000), + "update_instance": lambda instance, update: instance.load_snapshot(update), + }, +) + +# Register Task for serialization +Serializer.register( + Task, + { + "to_plain": lambda task: { + "value": task.get_value() if task.is_resolved() else None, + "state": task.get_state(), + }, + "from_plain": lambda data: Task.from_snapshot(data), + "create_empty": lambda: Task(), + "update_instance": lambda instance, update: instance.load_snapshot(update), + }, +) + +if __name__ == "__main__": + + class User(Serializable): + def __init__(self, name: str = "", age: int = 0, email: str | None = None) -> None: + self.name = name + self.age = age + self.email = email + + async def create_snapshot(self) -> dict[str, Any]: + return {"name": self.name, "age": self.age, "email": self.email} + + def load_snapshot(self, snapshot: dict[str, Any]) -> None: + self.name = snapshot["name"] + self.age = snapshot["age"] + self.email = snapshot.get("email") + + # Register the class + User.register() + + async def main() -> None: + try: + # Create and test serialization + user = User("Alice", 30, "alice@example.com") + + print("\n1. Basic User Serialization:") + serialized = await Serializer.serialize(user) + print(f"Serialized User: {serialized}") + + deserialized = await Serializer.deserialize(serialized) + print(f"Deserialized User: {deserialized.name}, {deserialized.age}") + + # Test with built-in types + print("\n2. Built-in Types Serialization:") + data = {"user": user, "numbers": [1, 2, 3], "timestamp": datetime.now(tz=UTC)} + + serialized = await Serializer.serialize(data) + print(f"Serialized Data: {serialized}") + + deserialized = await Serializer.deserialize(serialized) + print(f"Deserialized Data (user name): {deserialized['user'].name}") + + except SerializerError as e: + print(f"Serialization Error: {e}") + except Exception as e: + print(f"Unexpected Error: {e!s}") + + # Run the async main function + asyncio.run(main()) diff --git a/python/beeai_framework/memory/sliding_cache.py b/python/beeai_framework/memory/sliding_cache.py new file mode 100644 index 00000000..3e1c7f78 --- /dev/null +++ b/python/beeai_framework/memory/sliding_cache.py @@ -0,0 +1,111 @@ +# SPDX-License-Identifier: Apache-2.0 + +import time +from collections import OrderedDict +from typing import Any, Generic, TypeVar + +from beeai_framework.memory.base_cache import BaseCache +from beeai_framework.memory.serializer import Serializer + +T = TypeVar("T") + + +class SlidingCache(BaseCache[T], Generic[T]): + """Cache implementation using a sliding window strategy.""" + + def __init__(self, size: float = float("inf"), ttl: float | None = None) -> None: + """ + Initialize the sliding cache. + + Args: + size: Maximum number of items (default: infinite) + ttl: Time-to-live in seconds (default: None) + """ + super().__init__() + self._max_size = size + self._ttl = ttl + self._items: OrderedDict[str, tuple[T, float]] = OrderedDict() + # Register for serialization + self._register() + + @classmethod + def _register(cls) -> None: + """Register this class for serialization.""" + Serializer.register_serializable(cls) + + def _evict_expired(self) -> None: + """Remove expired entries.""" + if self._ttl is None: + return + + current_time = time.time() + expired_keys = [key for key, (_, timestamp) in self._items.items() if current_time - timestamp > self._ttl] + + for key in expired_keys: + self._items.pop(key, None) + + def _evict_overflow(self) -> None: + """Remove oldest entries if size limit is exceeded.""" + while len(self._items) > self._max_size: + self._items.popitem(last=False) + + async def set(self, key: str, value: T) -> None: + """Set a value in the cache.""" + self._evict_expired() + self._items[key] = (value, time.time()) + self._evict_overflow() + + async def get(self, key: str) -> T | None: + """Get a value from the cache.""" + self._evict_expired() + if key in self._items: + value, _ = self._items[key] + # Move to end (most recently used) + self._items.move_to_end(key) + return value + return None + + async def has(self, key: str) -> bool: + """Check if a key exists in the cache.""" + self._evict_expired() + return key in self._items + + async def delete(self, key: str) -> bool: + """Delete a key from the cache.""" + if key in self._items: + del self._items[key] + return True + return False + + async def clear(self) -> None: + """Clear all items from the cache.""" + self._items.clear() + + async def size(self) -> int: + """Get the current number of items in the cache.""" + self._evict_expired() + return len(self._items) + + async def create_snapshot(self) -> dict[str, Any]: + """Create a serializable snapshot of the current state.""" + self._evict_expired() + return { + "max_size": self._max_size, + "ttl": self._ttl, + "items": [(k, v[0], v[1]) for k, v in self._items.items()], + } + + def load_snapshot(self, snapshot: dict[str, Any]) -> None: + """Restore state from a snapshot.""" + self._max_size = snapshot["max_size"] + self._ttl = snapshot["ttl"] + self._items = OrderedDict() + for key, value, timestamp in snapshot["items"]: + self._items[key] = (value, timestamp) + + @classmethod + def from_snapshot(cls, snapshot: dict[str, Any]) -> "SlidingCache[T]": + """Create an instance from a snapshot.""" + instance = cls(size=snapshot.get("max_size", float("inf")), ttl=snapshot.get("ttl")) + instance.load_snapshot(snapshot) + return instance diff --git a/python/beeai_framework/memory/sliding_memory.py b/python/beeai_framework/memory/sliding_memory.py new file mode 100644 index 00000000..1a5fc910 --- /dev/null +++ b/python/beeai_framework/memory/sliding_memory.py @@ -0,0 +1,129 @@ +# SPDX-License-Identifier: Apache-2.0 + +from collections.abc import Callable +from copy import copy +from dataclasses import dataclass +from typing import Any, TypedDict + +from beeai_framework.backend import Message +from beeai_framework.memory import ResourceError +from beeai_framework.memory.base_memory import BaseMemory + + +class SlidingMemoryHandlers(TypedDict, total=False): + """Type definition for SlidingMemory handlers.""" + + removal_selector: Callable[[list[Message]], Message | list[Message]] + + +@dataclass +class SlidingMemoryConfig: + """Configuration for SlidingMemory.""" + + size: int + handlers: SlidingMemoryHandlers | None = None + + +class SlidingMemory(BaseMemory): + """Memory implementation using a sliding window approach.""" + + def __init__(self, config: SlidingMemoryConfig) -> None: + """Initialize SlidingMemory with given configuration. + + Args: + config: Configuration including window size and optional handlers + """ + self._messages: list[Message] = [] + self.config = config + + # Set default handlers if not provided + if self.config.handlers is None: + self.config.handlers = {} + + # Set default removal selector if not provided + if "removal_selector" not in self.config.handlers: + self.config.handlers["removal_selector"] = lambda messages: [messages[0]] + + @property + def messages(self) -> list[Message]: + """Get list of stored messages.""" + return self._messages + + def _is_overflow(self, additional_messages: int = 1) -> bool: + """Check if adding messages would cause overflow.""" + return len(self._messages) + additional_messages > self.config.size + + def _ensure_range(self, index: int, min_val: int, max_val: int) -> int: + """Ensure index is within the specified range.""" + return max(min_val, min(index, max_val)) + + async def add(self, message: Message, index: int | None = None) -> None: + """Add a message to memory, managing window size. + + Args: + message: Message to add + index: Optional position to insert message + + Raises: + ResourceFatalError: If removal selector fails to prevent overflow + """ + # Check for overflow + if self._is_overflow(): + # Get messages to remove using removal selector + to_remove = self.config.handlers["removal_selector"](self._messages) + if not isinstance(to_remove, list): + to_remove = [to_remove] + + # Remove selected messages + for msg in to_remove: + try: + msg_index = self._messages.index(msg) + self._messages.pop(msg_index) + except ValueError: + raise ResourceError( + "Cannot delete non existing message.", + context={"message": msg, "messages": self._messages}, + ) from ValueError + + # Check if we still have overflow + if self._is_overflow(): + raise ResourceError( + "Custom memory removalSelector did not return enough messages. Memory overflow has occurred." + ) + + # Add new message + if index is None: + index = len(self._messages) + index = self._ensure_range(index, 0, len(self._messages)) + self._messages.insert(index, message) + + async def delete(self, message: Message) -> bool: + """Delete a message from memory. + + Args: + message: Message to delete + + Returns: + bool: True if message was found and deleted + """ + try: + self._messages.remove(message) + return True + except ValueError: + return False + + def reset(self) -> None: + """Clear all messages from memory.""" + self._messages.clear() + + def create_snapshot(self) -> dict[str, Any]: + """Create a serializable snapshot of current state.""" + return { + "config": {"size": self.config.size, "handlers": self.config.handlers}, + "messages": copy(self._messages), + } + + def load_snapshot(self, state: dict[str, Any]) -> None: + """Restore state from a snapshot.""" + self.config = SlidingMemoryConfig(size=state["config"]["size"], handlers=state["config"]["handlers"]) + self._messages = copy(state["messages"]) diff --git a/python/beeai_framework/memory/summarize_memory.py b/python/beeai_framework/memory/summarize_memory.py new file mode 100644 index 00000000..0743c395 --- /dev/null +++ b/python/beeai_framework/memory/summarize_memory.py @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: Apache-2.0 + +from collections.abc import Iterable +from typing import TYPE_CHECKING + +from beeai_framework.backend import Message, SystemMessage +from beeai_framework.backend.message import UserMessage +from beeai_framework.memory.base_memory import BaseMemory + +if TYPE_CHECKING: + from beeai_framework.llms import BaseLLM + + +class SummarizeMemory(BaseMemory): + """Memory implementation that summarizes conversations.""" + + def __init__(self, llm: "BaseLLM") -> None: + self._messages: list[Message] = [] + self.llm = llm + + @property + def messages(self) -> list[Message]: + return self._messages + + async def add(self, message: Message, index: int | None = None) -> None: + """Add a message and trigger summarization if needed.""" + messages_to_summarize = [*self._messages, message] + summary = self._summarize_messages(messages_to_summarize) + + self._messages = [SystemMessage(summary)] + + async def add_many(self, messages: Iterable[Message], start: int | None = None) -> None: + """Add multiple messages and summarize.""" + messages_to_summarize = self._messages + list(messages) + summary = await self._summarize_messages(messages_to_summarize) + + self._messages = [SystemMessage(summary)] + + async def _summarize_messages(self, messages: list[Message]) -> str: + """Summarize a list of messages using the LLM.""" + if not messages: + return "" + + prompt = UserMessage( + """Summarize the following conversation. Be concise but include all key information. + +Previous messages: +{} + +Summary:""".format("\n".join([f"{msg.role}: {msg.text}" for msg in messages])) + ) + + # Generate is synchronous, not async + response = await self.llm.create({"messages": [prompt]}) + + return response.messages[0].get_texts()[0].get("text") + + async def delete(self, message: Message) -> bool: + """Delete a message from memory.""" + try: + self._messages.remove(message) + return True + except ValueError: + return False + + def reset(self) -> None: + """Clear all messages from memory.""" + self._messages.clear() + + def create_snapshot(self) -> dict: + """Create a serializable snapshot of current state.""" + return {"messages": self._messages.copy()} + + def load_snapshot(self, state: dict) -> None: + """Restore state from a snapshot.""" + self._messages = state["messages"].copy() diff --git a/python/beeai_framework/memory/task_map.py b/python/beeai_framework/memory/task_map.py new file mode 100644 index 00000000..79e401bb --- /dev/null +++ b/python/beeai_framework/memory/task_map.py @@ -0,0 +1,144 @@ +# SPDX-License-Identifier: Apache-2.0 + +import time +from collections import OrderedDict +from typing import Any, Generic, TypeVar + +K = TypeVar("K") +V = TypeVar("V") + + +class Task: + def __init__(self) -> None: + self._value = None + self._state = "pending" + self._resolved = False + + def resolve(self, value: Any) -> None: + self._value = value + self._state = "resolved" + self._resolved = True + + def get_value(self) -> Any: + return self._value + + def get_state(self) -> str: + return self._state + + def is_resolved(self) -> bool: + return self._resolved + + @classmethod + def from_snapshot(cls, snapshot: dict[str, Any]) -> "Task": + """Create instance from snapshot data.""" + task = cls() + if snapshot["state"] == "resolved": + task.resolve(snapshot["value"]) + return task + + def load_snapshot(self, snapshot: dict[str, Any]) -> None: + """Load state from snapshot data.""" + if snapshot["state"] == "resolved": + self.resolve(snapshot["value"]) + + +class SlidingTaskMap(Generic[K, V]): + """ + A size-limited map that evicts oldest entries when full. + Optionally supports TTL-based eviction. + """ + + def __init__(self, size: float, ttl: float | None = None) -> None: + """ + Initialize the sliding map. + + Args: + size: Maximum number of items to store + ttl: Time-to-live in seconds for entries (optional) + """ + self._max_size = size + self._ttl = ttl + self._items: OrderedDict[K, tuple[V, float]] = OrderedDict() + + def _evict_expired(self) -> None: + """Remove expired entries based on TTL.""" + if self._ttl is None: + return + + current_time = time.time() + expired_keys = [key for key, (_, timestamp) in self._items.items() if current_time - timestamp > self._ttl] + + for key in expired_keys: + self.delete(key) + + def get(self, key: K) -> V | None: + """Get a value by key, handling expiration.""" + self._evict_expired() + if key in self._items: + value, _ = self._items[key] + # Move to end to mark as recently used + self._items.move_to_end(key) + return value + return None + + def set(self, key: K, value: V) -> None: + """Set a value, handling size limits.""" + self._evict_expired() + + # If we're at max size and this is a new key, remove oldest + if len(self._items) >= self._max_size and key not in self._items: + self._items.popitem(last=False) + + self._items[key] = (value, time.time()) + self._items.move_to_end(key) + + def has(self, key: K) -> bool: + """Check if a key exists and hasn't expired.""" + self._evict_expired() + return key in self._items + + def delete(self, key: K) -> bool: + """Delete a key, returning True if it existed.""" + if key in self._items: + value, _ = self._items.pop(key) + if isinstance(value, Task): + value.destructor() + return True + return False + + def clear(self) -> None: + """Remove all items.""" + for key in list(self._items.keys()): + self.delete(key) + + @property + def size(self) -> int: + """Get current number of items.""" + self._evict_expired() + return len(self._items) + + @property + def ttl(self) -> float | None: + """Get the TTL value.""" + return self._ttl + + def entries(self) -> list[tuple[K, V]]: + """Get all entries for serialization.""" + return [(k, v[0]) for k, v in self._items.items()] + + @classmethod + def from_snapshot(cls, snapshot: dict[str, Any]) -> "SlidingTaskMap": + """Create instance from snapshot data.""" + instance = cls(size=snapshot["config"]["size"], ttl=snapshot["config"]["ttl"]) + for key, value in snapshot["entries"]: + instance.set(key, value) + return instance + + def load_snapshot(self, snapshot: dict[str, Any]) -> None: + """Load state from snapshot data.""" + self._size = snapshot["config"]["size"] + self._ttl = snapshot["config"]["ttl"] + self._items.clear() + current_time = time.time() + for key, value in snapshot["entries"]: + self._items[key] = (value, current_time) diff --git a/python/beeai_framework/memory/token_memory.py b/python/beeai_framework/memory/token_memory.py new file mode 100644 index 00000000..e73442fc --- /dev/null +++ b/python/beeai_framework/memory/token_memory.py @@ -0,0 +1,123 @@ +# SPDX-License-Identifier: Apache-2.0 + +from copy import copy +from math import ceil +from typing import Any + +from beeai_framework.backend import Message +from beeai_framework.memory.base_memory import BaseMemory + + +def simple_estimate(msg: Message) -> int: + return ceil(len(msg.text) / 4) + + +async def simple_tokenize(msgs: list[Message]) -> int: + return sum(map(simple_estimate, msgs)) + + +class TokenMemory(BaseMemory): + """Memory implementation that respects token limits.""" + + def __init__( + self, + llm: Any, + max_tokens: int | None = None, + sync_threshold: float = 0.25, + capacity_threshold: float = 0.75, + handlers: dict | None = None, + ) -> None: + self._messages: list[Message] = [] + self.llm = llm + self.max_tokens = max_tokens + self.threshold = capacity_threshold + self.sync_threshold = sync_threshold + self._tokens_by_message = {} + + self.handlers = { + "estimate": (handlers.get("estimate", self._default_estimate) if handlers else self._default_estimate), + "removal_selector": ( + handlers.get("removal_selector", lambda msgs: msgs[0]) if handlers else lambda msgs: msgs[0] + ), + "tokenize": (handlers.get("tokenize", simple_tokenize) if handlers else simple_tokenize), + } + + if not 0 <= self.threshold <= 1: + raise ValueError('"capacity_threshold" must be a number in range (0, 1)') + + @staticmethod + def _default_estimate(msg: Message) -> int: + return int((len(msg.role) + len(msg.text)) / 4) + + def _get_message_key(self, message: Message) -> str: + """Generate a unique key for a message.""" + return f"{message.role}:{message.text}" + + @property + def messages(self) -> list[Message]: + return self._messages + + @property + def tokens_used(self) -> int: + return sum(info.get("tokens_count", 0) for info in self._tokens_by_message.values()) + + @property + def is_dirty(self) -> bool: + return any(info.get("dirty", True) for info in self._tokens_by_message.values()) + + async def sync(self) -> None: + """Synchronize token counts with LLM.""" + for msg in self._messages: + key = self._get_message_key(msg) + cache = self._tokens_by_message.get(key, {}) + if cache.get("dirty", True): + try: + result = await self.handlers["tokenize"]([msg]) + self._tokens_by_message[key] = { + "tokens_count": result, + "dirty": False, + } + except Exception as e: + print(f"Error tokenizing message: {e!s}") + self._tokens_by_message[key] = { + "tokens_count": self.handlers["estimate"](msg), + "dirty": True, + } + + async def add(self, message: Message, index: int | None = None) -> None: + index = len(self._messages) if index is None else max(0, min(index, len(self._messages))) + self._messages.insert(index, message) + + key = self._get_message_key(message) + estimated_tokens = self.handlers["estimate"](message) + self._tokens_by_message[key] = { + "tokens_count": estimated_tokens, + "dirty": True, + } + + dirty_count = sum(1 for info in self._tokens_by_message.values() if info.get("dirty", True)) + if len(self._messages) > 0 and dirty_count / len(self._messages) >= self.sync_threshold: + await self.sync() + + async def delete(self, message: Message) -> bool: + try: + key = self._get_message_key(message) + self._messages.remove(message) + self._tokens_by_message.pop(key, None) + return True + except ValueError: + return False + + def reset(self) -> None: + self._messages.clear() + self._tokens_by_message.clear() + + def create_snapshot(self) -> dict[str, Any]: + return { + "messages": copy(self._messages), + "token_counts": copy(self._tokens_by_message), + } + + def load_snapshot(self, state: dict[str, Any]) -> None: + self._messages = copy(state["messages"]) + self._tokens_by_message = copy(state["token_counts"]) diff --git a/python/beeai_framework/memory/unconstrained_cache.py b/python/beeai_framework/memory/unconstrained_cache.py new file mode 100644 index 00000000..f6c73dd7 --- /dev/null +++ b/python/beeai_framework/memory/unconstrained_cache.py @@ -0,0 +1,161 @@ +# SPDX-License-Identifier: Apache-2.0 + +from typing import Any, Generic, TypeVar + +from beeai_framework.memory.base_cache import BaseCache +from beeai_framework.memory.serializer import Serializer + +T = TypeVar("T") + + +class UnconstrainedCache(BaseCache[T], Generic[T]): + """Cache implementation without size or time constraints.""" + + def __init__(self) -> None: + """Initialize the unconstrained cache.""" + super().__init__() + self._provider: dict[str, T] = {} + self._register() + + @classmethod + def _register(cls) -> None: + """Register for serialization.""" + Serializer.register( + cls, + { + "to_plain": lambda x: { + "enabled": x.enabled, + "provider": dict(x._provider), + }, + "from_plain": lambda x: cls.from_snapshot(x), + }, + ) + + async def get(self, key: str) -> T: + """Get a value from the cache.""" + return self._provider.get(key) + + async def has(self, key: str) -> bool: + """Check if a key exists in the cache.""" + return key in self._provider + + async def clear(self) -> None: + """Clear all items from the cache.""" + self._provider.clear() + + async def delete(self, key: str) -> bool: + """Delete a key from the cache.""" + if key in self._provider: + del self._provider[key] + return True + return False + + async def set(self, key: str, value: T) -> None: + """Set a value in the cache.""" + self._provider[key] = value + + async def size(self) -> int: + """Get the current number of items in the cache.""" + return len(self._provider) + + async def create_snapshot(self) -> dict[str, Any]: + """Create a serializable snapshot of the current state.""" + return {"enabled": self.enabled, "provider": dict(self._provider)} + + def load_snapshot(self, snapshot: dict[str, Any]) -> None: + """Restore state from a snapshot.""" + self._enabled = snapshot["enabled"] + self._provider = dict(snapshot["provider"]) + + @classmethod + def from_snapshot(cls, snapshot: dict[str, Any]) -> "UnconstrainedCache[T]": + """Create an instance from a snapshot.""" + instance = cls() + instance.load_snapshot(snapshot) + return instance + + +if __name__ == "__main__": + import asyncio + + async def test_unconstrained_cache() -> None: + try: + print("\n1. Testing Basic Operations:") + # Create cache instance + cache = UnconstrainedCache[str]() + + # Test setting and getting values + print("Setting test values...") + await cache.set("key1", "value1") + await cache.set("key2", "value2") + await cache.set("key3", "value3") + + # Test retrieval + value1 = await cache.get("key1") + value2 = await cache.get("key2") + print(f"Retrieved values: key1={value1}, key2={value2}") + + # Test has method + exists = await cache.has("key1") + not_exists = await cache.has("nonexistent") + print(f"Has key1: {exists}") + print(f"Has nonexistent: {not_exists}") + + # Test size + size = await cache.size() + print(f"Cache size: {size}") + + print("\n2. Testing Delete Operation:") + # Test deletion + deleted = await cache.delete("key2") + size_after_delete = await cache.size() + print(f"Deleted key2: {deleted}") + print(f"Size after delete: {size_after_delete}") + + print("\n3. Testing Clear Operation:") + # Test clear + await cache.clear() + size_after_clear = await cache.size() + print(f"Size after clear: {size_after_clear}") + + print("\n4. Testing Serialization:") + # Test serialization + new_cache = UnconstrainedCache[str]() + await new_cache.set("test1", "data1") + await new_cache.set("test2", "data2") + + # Create snapshot + snapshot = await new_cache.create_snapshot() + print(f"Created snapshot: {snapshot}") + + # Create new instance from snapshot + restored_cache = UnconstrainedCache.from_snapshot(snapshot) + restored_value = await restored_cache.get("test1") + print(f"Restored value from snapshot: {restored_value}") + + print("\n5. Testing Enabled Property:") + # Test enabled property + original_state = new_cache.enabled + new_cache.enabled = False + print(f"Original enabled state: {original_state}") + print(f"New enabled state: {new_cache.enabled}") + + print("\n6. Testing Large Dataset:") + # Test with larger dataset + large_cache = UnconstrainedCache[int]() + print("Adding 1000 items...") + for i in range(1000): + await large_cache.set(f"key{i}", i) + + large_size = await large_cache.size() + sample_value = await large_cache.get("key500") + print(f"Large cache size: {large_size}") + print(f"Sample value (key500): {sample_value}") + + print("\nAll tests completed successfully!") + + except Exception as e: + print(f"Error during test: {e!s}") + + # Run the tests + asyncio.run(test_unconstrained_cache()) diff --git a/python/beeai_framework/memory/unconstrained_memory.py b/python/beeai_framework/memory/unconstrained_memory.py new file mode 100644 index 00000000..d8332c8a --- /dev/null +++ b/python/beeai_framework/memory/unconstrained_memory.py @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: Apache-2.0 + +from copy import copy + +from beeai_framework.backend import Message +from beeai_framework.memory.base_memory import BaseMemory + + +class UnconstrainedMemory(BaseMemory): + """Simple memory implementation with no constraints.""" + + def __init__(self) -> None: + self._messages: list[Message] = [] + + @property + def messages(self) -> list[Message]: + return self._messages + + async def add(self, message: Message, index: int | None = None) -> None: + index = len(self._messages) if index is None else max(0, min(index, len(self._messages))) + self._messages.insert(index, message) + + async def delete(self, message: Message) -> bool: + try: + self._messages.remove(message) + return True + except ValueError: + return False + + def reset(self) -> None: + self._messages.clear() + + def create_snapshot(self) -> dict: + return {"messages": copy(self._messages)} + + def load_snapshot(self, state: dict) -> None: + self._messages = copy(state["messages"]) diff --git a/python/beeai_framework/parsers/line_prefix.py b/python/beeai_framework/parsers/line_prefix.py new file mode 100644 index 00000000..1b76ae0b --- /dev/null +++ b/python/beeai_framework/parsers/line_prefix.py @@ -0,0 +1,41 @@ +from collections.abc import Generator + +from pydantic import BaseModel + + +class Prefix(BaseModel): + name: str + line_prefix: str + terminal: bool = False + + +class ParsedLine(BaseModel): + prefix: Prefix + content: str + + +class LinePrefixParser: + def __init__(self, prefixes: list[Prefix]) -> None: + self.prefixes: list[Prefix] = prefixes + self.buffer: str = "" + + def feed(self, chunk: str) -> Generator[ParsedLine | None, None, None]: + # Feeds a chunk of text into the parser and processes complete lines + self.buffer += chunk + lines = self.buffer.split("\n") + self.buffer = lines.pop() # Keep last partial line in buffer + + for line in lines: + yield self.process_line(line) + + def process_line(self, line: str) -> ParsedLine | None: + # Processes a single line, extracting the prefix if present + for prefix in self.prefixes: + if line.startswith(prefix.line_prefix): + return ParsedLine(prefix=prefix, content=line[len(prefix.line_prefix) :]) + return None # no match + + def finalize(self) -> Generator[ParsedLine | None, None, None]: + # process any remaining partial line in the buffer + if self.buffer: + yield self.process_line(self.buffer) diff --git a/python/beeai_framework/tools/__init__.py b/python/beeai_framework/tools/__init__.py new file mode 100644 index 00000000..bd5ca20e --- /dev/null +++ b/python/beeai_framework/tools/__init__.py @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: Apache-2.0 + +from beeai_framework.tools.errors import ToolError, ToolInputValidationError +from beeai_framework.tools.tool import ( + StringToolOutput, + Tool, + ToolOutput, + tool, +) + +__all__ = [ + "StringToolOutput", + "Tool", + "ToolError", + "ToolInputValidationError", + "ToolOutput", + "tool", +] diff --git a/python/beeai_framework/tools/errors.py b/python/beeai_framework/tools/errors.py new file mode 100644 index 00000000..591fbdcf --- /dev/null +++ b/python/beeai_framework/tools/errors.py @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: Apache-2.0 + +from beeai_framework.errors import FrameworkError + + +class ToolError(FrameworkError): + def __init__(self, message: str = "Tool Error", *, cause: Exception | None = None) -> None: + super().__init__(message, is_fatal=True, is_retryable=False, cause=cause) + + +class ToolInputValidationError(FrameworkError): + def __init__(self, message: str = "Tool Input Validation Error", *, cause: Exception | None = None) -> None: + super().__init__(message, is_fatal=True, is_retryable=False, cause=cause) diff --git a/python/beeai_framework/tools/mcp_tools.py b/python/beeai_framework/tools/mcp_tools.py new file mode 100644 index 00000000..bec89374 --- /dev/null +++ b/python/beeai_framework/tools/mcp_tools.py @@ -0,0 +1,80 @@ +# SPDX-License-Identifier: Apache-2.0 + +import json +from dataclasses import dataclass +from typing import Any, TypeVar + +from mcp.client.session import ClientSession +from mcp.types import CallToolResult +from mcp.types import Tool as MCPToolInfo + +from beeai_framework.emitter import Emitter, EmitterInput +from beeai_framework.tools import Tool +from beeai_framework.tools.tool import ToolOutput +from beeai_framework.utils import BeeLogger + +logger = BeeLogger(__name__) + +T = TypeVar("T") + + +@dataclass +class MCPToolInput: + """Input configuration for MCP Tool initialization.""" + + client: ClientSession + tool: MCPToolInfo + + +class MCPToolOutput(ToolOutput): + """Output class for MCP Tool results.""" + + def __init__(self, result: CallToolResult) -> None: + self.result = result + + def get_text_content(self) -> str: + return json.dumps(self.result, default=lambda o: o.__dict__, sort_keys=True, indent=4) + + def is_empty(self) -> bool: + return not self.result + + +class MCPTool(Tool[MCPToolOutput]): + """Tool implementation for Model Context Protocol.""" + + def __init__(self, client: ClientSession, tool: MCPToolInfo, **options: int) -> None: + """Initialize MCPTool with client and tool configuration.""" + super().__init__(options) + self.client = client + self._tool = tool + self._name = tool.name + self._description = tool.description or "No available description, use the tool based on its name and schema." + self.emitter = Emitter.root().child( + EmitterInput( + namespace=["tool", "mcp", self._name], + creator=self, + ) + ) + + @property + def name(self) -> str: + return self._name + + @property + def description(self) -> str: + return self._description + + def input_schema(self) -> str: + return self._tool.inputSchema + + async def _run(self, input_data: Any, options: dict | None = None) -> MCPToolOutput: + """Execute the tool with given input.""" + logger.debug(f"Executing tool {self.name} with input: {input_data}") + result = await self.client.call_tool(name=self.name, arguments=input_data) + logger.debug(f"Tool result: {result}") + return MCPToolOutput(result) + + @classmethod + async def from_client(cls, client: ClientSession) -> list["MCPTool"]: + tools_result = await client.list_tools() + return [cls(client=client, tool=tool) for tool in tools_result.tools] diff --git a/python/beeai_framework/tools/search/__init__.py b/python/beeai_framework/tools/search/__init__.py new file mode 100644 index 00000000..644e2098 --- /dev/null +++ b/python/beeai_framework/tools/search/__init__.py @@ -0,0 +1,6 @@ +# isort: skip_file +# manually defined import order is import here to avoid circular imports +from beeai_framework.tools.search.base import SearchToolResult, SearchToolOutput +from beeai_framework.tools.search.duckduckgo import DuckDuckGoSearchTool + +__all__ = ["DuckDuckGoSearchTool", "SearchToolOutput", "SearchToolResult"] diff --git a/python/beeai_framework/tools/search/base.py b/python/beeai_framework/tools/search/base.py new file mode 100644 index 00000000..68ddfb59 --- /dev/null +++ b/python/beeai_framework/tools/search/base.py @@ -0,0 +1,28 @@ +import json + +from pydantic import BaseModel + +from beeai_framework.tools.tool import ToolOutput + + +class SearchToolResult(BaseModel): + title: str + description: str + url: str + + +class SearchToolOutput(ToolOutput): + def __init__(self, results: list[SearchToolResult]) -> None: + super().__init__() + self.results = results + + def get_text_content(self) -> str: + return "\n\n".join( + [json.dumps(result, default=lambda o: o.__dict__, sort_keys=True, indent=4) for result in self.results] + ) + + def is_empty(self) -> bool: + return len(self.results) == 0 + + def sources(self) -> list[str]: + return {result.url for result in self.results} diff --git a/python/beeai_framework/tools/search/duckduckgo.py b/python/beeai_framework/tools/search/duckduckgo.py new file mode 100644 index 00000000..93171230 --- /dev/null +++ b/python/beeai_framework/tools/search/duckduckgo.py @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: Apache-2.0 + +from typing import Any + +from duckduckgo_search import DDGS +from pydantic import BaseModel, Field + +from beeai_framework.tools import ToolError +from beeai_framework.tools.search import SearchToolOutput, SearchToolResult +from beeai_framework.tools.tool import Tool +from beeai_framework.utils import BeeLogger + +logger = BeeLogger(__name__) + + +class DuckDuckGoSearchType: + STRICT = "STRICT" + MODERATE = "MODERATE" + OFF = "OFF" + + +class DuckDuckGoSearchToolInput(BaseModel): + query: str = Field(description="The search query.") + + +class DuckDuckGoSearchToolResult(SearchToolResult): + pass + + +class DuckDuckGoSearchToolOutput(SearchToolOutput): + pass + + +class DuckDuckGoSearchTool(Tool[DuckDuckGoSearchToolInput]): + name = "DuckDuckGo" + description = "Search for online trends, news, current events, real-time information, or research topics." + input_schema = DuckDuckGoSearchToolInput + + def __init__(self, max_results: int = 10, safe_search: str = DuckDuckGoSearchType.STRICT) -> None: + super().__init__() + self.max_results = max_results + self.safe_search = safe_search + + def _run(self, input: DuckDuckGoSearchToolInput, _: Any | None = None) -> DuckDuckGoSearchToolOutput: + try: + results = DDGS().text(input.query, max_results=self.max_results, safesearch=self.safe_search) + search_results: list[SearchToolResult] = [ + DuckDuckGoSearchToolResult( + title=result.get("title") or "", description=result.get("body") or "", url=result.get("href") or "" + ) + for result in results + ] + return DuckDuckGoSearchToolOutput(search_results) + + except Exception as e: + raise ToolError("Error performing search:") from e diff --git a/python/beeai_framework/tools/search/wikipedia.py b/python/beeai_framework/tools/search/wikipedia.py new file mode 100644 index 00000000..c77f86ec --- /dev/null +++ b/python/beeai_framework/tools/search/wikipedia.py @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: Apache-2.0 + +from beeai_framework.tools.tool import Tool + + +class WikipediaTool(Tool): + name = "Wikipedia" + description = "Search factual and historical information, including biography, history, politics, geography, society, culture, science, technology, people, animal species, mathematics, and other subjects." # noqa: E501 + + def input_schema(self) -> str: + # TODO: remove hard code + return '{"type":"object","properties":{"query":{"type":"string","format":"date","description":"Name of the wikipedia page, for example \'New York\'"}}}' # noqa: E501 + + def _run(self) -> None: + pass diff --git a/python/beeai_framework/tools/tool.py b/python/beeai_framework/tools/tool.py new file mode 100644 index 00000000..e098d8c6 --- /dev/null +++ b/python/beeai_framework/tools/tool.py @@ -0,0 +1,133 @@ +# SPDX-License-Identifier: Apache-2.0 + +import inspect +from abc import ABC, abstractmethod +from collections.abc import Callable +from typing import Any, Generic, TypeVar + +from pydantic import BaseModel, ConfigDict, ValidationError, create_model + +from beeai_framework.tools.errors import ToolInputValidationError +from beeai_framework.utils import BeeLogger + +logger = BeeLogger(__name__) + +T = TypeVar("T", bound=BaseModel) + + +class ToolOutput(ABC): + @abstractmethod + def get_text_content(self) -> str: + pass + + @abstractmethod + def is_empty(self) -> bool: + pass + + def to_string(self) -> str: + return self.get_text_content() + + +class StringToolOutput(ToolOutput): + def __init__(self, result: str = "") -> None: + super().__init__() + self.result = result + + def is_empty(self) -> bool: + return len(self.result) == 0 + + def get_text_content(self) -> str: + return self.result + + +class Tool(Generic[T], ABC): + options: dict[str, Any] + + def __init__(self, options: dict[str, Any] | None = None) -> None: + if options is None: + options = {} + self.options = options + + @property + @abstractmethod + def name(self) -> str: + pass + + @property + @abstractmethod + def description(self) -> str: + pass + + @property + @abstractmethod + def input_schema(self) -> type[T]: + pass + + @abstractmethod + def _run(self, input: Any, options: dict[str, Any] | None = None) -> Any: + pass + + def validate_input(self, input: T | dict[str, Any]) -> T: + try: + return self.input_schema.model_validate(input) + except ValidationError as e: + logger.error(f"Validation error: {e!s}") + raise ToolInputValidationError("Tool input validation error") from e + + def prompt_data(self) -> dict[str, str]: + return { + "name": self.name, + "description": self.description, + "input_schema": str(self.input_schema.model_json_schema(mode="serialization")), + } + + def run(self, input: T | dict[str, Any], options: dict[str, Any] | None = None) -> Any: + return self._run(self.validate_input(input), options) + + +# this method was inspired by the discussion that was had in this issue: +# https://github.com/pydantic/pydantic/issues/1391 +def get_input_schema(tool_function: Callable) -> BaseModel: + input_model_name = tool_function.__name__ + + args, _, _, defaults, kwonlyargs, kwonlydefaults, annotations = inspect.getfullargspec(tool_function) + defaults = defaults or [] + args = args or [] + + non_default_args = len(args) - len(defaults) + try: + defaults = (...,) * non_default_args + defaults + except TypeError: + defaults = [ + ..., + ] * non_default_args + defaults + + keyword_only_params = {param: kwonlydefaults.get(param, Any) for param in kwonlyargs} + params = {param: (annotations.get(param, Any), default) for param, default in zip(args, defaults, strict=False)} + + input_model = create_model( + input_model_name, + **params, + **keyword_only_params, + __config__=ConfigDict(extra="allow", arbitrary_types_allowed=True), + ) + + return input_model + + +def tool(tool_function: Callable) -> Tool: + tool_name = tool_function.__name__ + tool_description = inspect.getdoc(tool_function) + tool_input = get_input_schema(tool_function) + + class FunctionTool(Tool): + name = tool_name + description = tool_description + input_schema = tool_input + + def _run(self, tool_in: Any, _: dict[str, Any] | None = None) -> None: + tool_input_dict = tool_in.model_dump() + return tool_function(**tool_input_dict) + + f_tool = FunctionTool() + return f_tool diff --git a/python/beeai_framework/tools/weather/openmeteo.py b/python/beeai_framework/tools/weather/openmeteo.py new file mode 100644 index 00000000..baa7c020 --- /dev/null +++ b/python/beeai_framework/tools/weather/openmeteo.py @@ -0,0 +1,124 @@ +# SPDX-License-Identifier: Apache-2.0 + +import json +from collections import namedtuple +from datetime import UTC, datetime +from typing import Any, Literal +from urllib.parse import urlencode + +import requests +from pydantic import BaseModel, Field + +from beeai_framework.tools import ToolInputValidationError +from beeai_framework.tools.tool import StringToolOutput, Tool +from beeai_framework.utils import BeeLogger + +logger = BeeLogger(__name__) + + +class OpenMeteoToolInput(BaseModel): + location_name: str = Field(description="The name of the location to retrieve weather information.") + country: str | None = Field(description="Country name.", default=None) + start_date: str | None = Field( + description="Start date for the weather forecast in the format YYYY-MM-DD (UTC)", default=None + ) + end_date: str | None = Field( + description="End date for the weather forecast in the format YYYY-MM-DD (UTC)", default=None + ) + temperature_unit: Literal["celsius", "fahrenheit"] = Field( + description="The unit to express temperature", default="celsius" + ) + + +class OpenMeteoTool(Tool[OpenMeteoToolInput]): + name = "OpenMeteoTool" + description = "Retrieve current, past, or future weather forecasts for a location." + input_schema = OpenMeteoToolInput + + def _geocode(self, input: OpenMeteoToolInput) -> dict[str, Any]: + params = {"format": "json", "count": 1} + if input.location_name: + params["name"] = input.location_name + if input.country: + params["country"] = input.country + + params = urlencode(params, doseq=True) + + response = requests.get( + f"https://geocoding-api.open-meteo.com/v1/search?{params}", + headers={"Content-Type": "application/json", "Accept": "application/json"}, + ) + + response.raise_for_status() + results = response.json()["results"] + return results[0] + + def get_params(self, input: OpenMeteoToolInput) -> dict[str, Any]: + params = { + "current": ",".join( + [ + "temperature_2m", + "rain", + "relative_humidity_2m", + "wind_speed_10m", + ] + ), + "daily": ",".join(["temperature_2m_max", "temperature_2m_min", "rain_sum"]), + "timezone": "UTC", + } + + geocode = self._geocode(input) + params["latitude"] = geocode.get("latitude") + params["longitude"] = geocode.get("longitude") + + Dates = namedtuple("Dates", ["start_date", "end_date"]) + + def _validate_and_set_dates(start_date: str | None, end_date: str | None) -> Dates: + # Trim date str assuming YYYY-MM-DD + def _trim_date(date_str: str) -> str: + return date_str[0:10] + + start, end = None, None + + if start_date: + try: + start = datetime.strptime(_trim_date(start_date), "%Y-%m-%d").replace(tzinfo=UTC) + except ValueError as e: + raise ToolInputValidationError( + "'start_date' is incorrectly formatted, please use the correct format YYYY-MM-DD." + ) from e + else: + start = datetime.now(UTC) + + if end_date: + try: + end = datetime.strptime(_trim_date(end_date), "%Y-%m-%d").replace(tzinfo=UTC) + except ValueError as e: + raise ToolInputValidationError( + "'end_date' is incorrectly formatted, please use the correct format YYYY-MM-DD." + ) from e + + if end < start: + raise ToolInputValidationError("'end_date' must fall on or after 'start_date'.") from None + + else: + end = datetime.now(UTC) + + return Dates(start_date=start.strftime("%Y-%m-%d"), end_date=end.strftime("%Y-%m-%d")) + + dates = _validate_and_set_dates(start_date=input.start_date, end_date=input.end_date) + + params["start_date"] = dates.start_date + params["end_date"] = dates.end_date + params["temperature_unit"] = input.temperature_unit + return params + + def _run(self, input: OpenMeteoToolInput, options: Any = None) -> None: + params = urlencode(self.get_params(input), doseq=True) + logger.debug(f"Using OpenMeteo URL: https://api.open-meteo.com/v1/forecast?{params}") + response = requests.get( + f"https://api.open-meteo.com/v1/forecast?{params}", + headers={"Content-Type": "application/json", "Accept": "application/json"}, + ) + response.raise_for_status() + return StringToolOutput(json.dumps(response.json())) diff --git a/python/beeai_framework/utils/__init__.py b/python/beeai_framework/utils/__init__.py new file mode 100644 index 00000000..ec59dbaa --- /dev/null +++ b/python/beeai_framework/utils/__init__.py @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 + +from beeai_framework.utils.config import CONFIG +from beeai_framework.utils.custom_logger import BeeLogger +from beeai_framework.utils.errors import LoggerError, PromptTemplateError +from beeai_framework.utils.events import MessageEvent + +__all__ = ["CONFIG", "BeeLogger", "LoggerError", "MessageEvent", "PromptTemplateError"] diff --git a/python/beeai_framework/utils/_types.py b/python/beeai_framework/utils/_types.py new file mode 100644 index 00000000..ab6df62c --- /dev/null +++ b/python/beeai_framework/utils/_types.py @@ -0,0 +1,7 @@ +from collections.abc import Awaitable, Callable +from typing import ParamSpec, TypeAlias, TypeVar + +P = ParamSpec("P") +R = TypeVar("R") + +MaybeAsync: TypeAlias = Callable[P, R] | Callable[P, Awaitable[R]] diff --git a/python/beeai_framework/utils/config.py b/python/beeai_framework/utils/config.py new file mode 100644 index 00000000..3ccf1be5 --- /dev/null +++ b/python/beeai_framework/utils/config.py @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: Apache-2.0 + +from pydantic_settings import BaseSettings, SettingsConfigDict + + +class Settings(BaseSettings): + model_config = SettingsConfigDict( + env_file=".env", + env_file_encoding="utf-8", + env_nested_delimiter="__", + env_prefix="beeai_", + extra="ignore", + ) + + log_level: str = "INFO" + + +CONFIG = Settings() diff --git a/python/beeai_framework/utils/counter.py b/python/beeai_framework/utils/counter.py new file mode 100644 index 00000000..f88c944a --- /dev/null +++ b/python/beeai_framework/utils/counter.py @@ -0,0 +1,23 @@ +class RetryCounter: + def __init__(self, error_type: type[BaseException], max_retries: int = 0) -> None: + self._max_retries = max_retries + self.error_type = error_type + self.remaining = max_retries + + self._error_class: type[BaseException] = error_type # TODO: FrameworkError + self._lastError: BaseException | None = None + self._finalError: BaseException | None = None + + def use(self, error: BaseException) -> None: + if self._finalError: + raise self._finalError + + self._lastError = error or self._lastError + self.remaining -= 1 + + # TODO: ifFatal, isRetryable etc + if self.remaining < 0: + self._finalError = self._error_class( + f"Maximal amount of global retries (${self._max_retries}) has been reached." + ) + raise self._finalError diff --git a/python/beeai_framework/utils/custom_logger.py b/python/beeai_framework/utils/custom_logger.py new file mode 100644 index 00000000..150c0024 --- /dev/null +++ b/python/beeai_framework/utils/custom_logger.py @@ -0,0 +1,107 @@ +# SPDX-License-Identifier: Apache-2.0 + +import logging +import sys + +from pyventus import EventHandler, EventLinker + +from beeai_framework.backend import Role +from beeai_framework.utils.config import CONFIG +from beeai_framework.utils.events import MessageEvent + +_handler: EventHandler = None + + +class BeeLoggerFormatter: + def format(self, record: logging.LogRecord) -> logging.Formatter: + if hasattr(record, "is_event_message") and record.is_event_message: + return logging.Formatter( + "{asctime} | {levelname:<8s} |{message}", + style="{", + datefmt="%Y-%m-%d %H:%M:%S", + ).format(record) + else: + return logging.Formatter( + "{asctime} | {levelname:<8s} | {name}:{funcName}:{lineno} - {message}", + style="{", + datefmt="%Y-%m-%d %H:%M:%S", + ).format(record) + + +class BeeLogger(logging.Logger): + def __init__(self, name: str, level: str = CONFIG.log_level) -> None: + self.add_logging_level("TRACE", logging.DEBUG - 5) + + super().__init__(name, level) + + console_handler = logging.StreamHandler(stream=sys.stdout) + console_handler.setFormatter(BeeLoggerFormatter()) + + self.addHandler(console_handler) + + global _handler + if _handler is None: + _handler = EventLinker.subscribe(MessageEvent, event_callback=self.log_message_events) + + # https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility/35804945#35804945 + def add_logging_level(self, level_name: str, level_num: int, method_name: str | None = None) -> None: + """ + Comprehensively adds a new logging level to the `logging` module and the + currently configured logging class. + + `level_name` becomes an attribute of the `logging` module with the value + `level_num`. `method_name` becomes a convenience method for both `logging` + itself and the class returned by `logging.getLoggerClass()` (usually just + `logging.Logger`). If `method_name` is not specified, `level_name.lower()` is + used. + + To avoid accidental clobberings of existing attributes, this method will + return without action if the level name is already an attribute of the + `logging` module or if the method name is already present + + Example + ------- + >>> add_logging_level('TRACE', logging.DEBUG - 5) + >>> logging.getLogger(__name__).setLevel("TRACE") + >>> logging.getLogger(__name__).trace('that worked') + >>> logging.trace('so did this') + >>> logging.TRACE + 5 + + """ + if not method_name: + method_name = level_name.lower() + + if hasattr(logging, level_name): + # already defined in logging module + return + if hasattr(logging, method_name): + # already defined in logging module + return + if hasattr(logging.getLoggerClass(), method_name): # pragma: no cover + # already defined in logger class + return + + # This method was inspired by the answers to Stack Overflow post + # http://stackoverflow.com/q/2183233/2988730, especially + # http://stackoverflow.com/a/13638084/2988730 + def log_for_level(self: logging.Logger, message: str, *args: int, **kwargs: int) -> None: # pragma: no cover + if self.isEnabledFor(level_num): + self._log(level_num, message, args, stacklevel=2, **kwargs) + + def log_to_root(message: str, *args: int, **kwargs: int) -> None: # pragma: no cover + logging.log(level_num, message, *args, **kwargs) + + logging.addLevelName(level_num, level_name) + setattr(logging, level_name, level_num) + setattr(logging.getLoggerClass(), method_name, log_for_level) + setattr(logging, method_name, log_to_root) + + def log_message_events(self, event: MessageEvent) -> None: + source = str.lower(event.source) + state = f" ({event.state})" if event.state else "" + icon = " ๐Ÿ‘ค" if source == str.lower(Role.USER) else " ๐Ÿค–" + self.info( + f" {str.capitalize(source)}{state}{icon}: {event.message}", + extra={"is_event_message": True}, + ) diff --git a/python/beeai_framework/utils/errors.py b/python/beeai_framework/utils/errors.py new file mode 100644 index 00000000..53b0c54a --- /dev/null +++ b/python/beeai_framework/utils/errors.py @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: Apache-2.0 + +from beeai_framework.errors import FrameworkError + + +class LoggerError(FrameworkError): + """Raised for errors caused by logging.""" + + def __init__(self, message: str = "Logger error", *, cause: Exception | None = None) -> None: + super().__init__(message, is_fatal=True, is_retryable=False, cause=cause) + + +class PromptTemplateError(FrameworkError): + """Raised for errors caused by PromptTemplate.""" + + def __init__(self, message: str = "PromptTemplate error", *, cause: Exception | None = None) -> None: + super().__init__(message, is_fatal=True, is_retryable=False, cause=cause) diff --git a/python/beeai_framework/utils/events.py b/python/beeai_framework/utils/events.py new file mode 100644 index 00000000..63e29d29 --- /dev/null +++ b/python/beeai_framework/utils/events.py @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: Apache-2.0 + +from dataclasses import dataclass +from typing import Literal + + +@dataclass +class MessageEvent: + source: Literal["User", "Agent"] + message: str + state: str | None = None diff --git a/python/beeai_framework/utils/models.py b/python/beeai_framework/utils/models.py new file mode 100644 index 00000000..16b9f057 --- /dev/null +++ b/python/beeai_framework/utils/models.py @@ -0,0 +1,20 @@ +from typing import TypeVar, Union + +from pydantic import BaseModel +from pydantic_core import SchemaValidator + +T = TypeVar("T", bound=BaseModel) +ModelLike = Union[T, dict] # noqa: UP007 + + +def to_model(cls: type[T], obj: ModelLike[T]) -> T: + return obj if isinstance(obj, cls) else cls.model_validate(obj, strict=True) + + +def to_model_optional(cls: type[T], obj: ModelLike[T] | None) -> T | None: + return None if obj is None else to_model(cls, obj) + + +def check_model(model: T) -> None: + schema_validator = SchemaValidator(schema=model.__pydantic_core_schema__) + schema_validator.validate_python(model.__dict__) diff --git a/python/beeai_framework/utils/regex.py b/python/beeai_framework/utils/regex.py new file mode 100644 index 00000000..81506451 --- /dev/null +++ b/python/beeai_framework/utils/regex.py @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: Apache-2.0 + +import re + + +def is_valid_regex(pattern: str) -> bool: + try: + re.compile(pattern) + return True + except re.error: + return False diff --git a/python/beeai_framework/utils/templates.py b/python/beeai_framework/utils/templates.py new file mode 100644 index 00000000..e9b1e6ba --- /dev/null +++ b/python/beeai_framework/utils/templates.py @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: Apache-2.0 + +from collections.abc import Callable +from typing import Any, Generic, TypedDict, TypeVar + +import chevron +from pydantic import BaseModel + +from beeai_framework.utils.errors import PromptTemplateError + + +class Prompt(TypedDict): + prompt: str | None + + +T = TypeVar("T", bound=BaseModel) + + +class PromptTemplate(Generic[T]): + def __init__(self, schema: type[T], template: str, functions: dict[str, Callable[[], str]] | None = None) -> None: + self._schema: type[T] = schema + self._template: str = template + self._functions: dict[str, Callable[[], str]] | None = functions + + def validate_input(self, input: T | dict[str, Any]) -> None: + self._schema.model_validate(input) + + def render(self, input: T | dict[str, Any]) -> str: + self.validate_input(input) + + # Make sure the data is converted to a dict + data = input.model_dump() if isinstance(input, BaseModel) else input + + # Apply function derived data + if self._functions: + for key in self._functions: + if key in data: + raise PromptTemplateError(f"Function named '{key}' clashes with input data field!") + data[key] = self._functions[key]() + + return chevron.render(template=self._template, data=data) diff --git a/python/beeai_framework/workflows/__init__.py b/python/beeai_framework/workflows/__init__.py new file mode 100644 index 00000000..6f9d4043 --- /dev/null +++ b/python/beeai_framework/workflows/__init__.py @@ -0,0 +1,4 @@ +from beeai_framework.workflows.errors import WorkflowError +from beeai_framework.workflows.workflow import Workflow, WorkflowReservedStepName + +__all__ = ["Workflow", "WorkflowError", "WorkflowReservedStepName"] diff --git a/python/beeai_framework/workflows/agent.py b/python/beeai_framework/workflows/agent.py new file mode 100644 index 00000000..ee33bd62 --- /dev/null +++ b/python/beeai_framework/workflows/agent.py @@ -0,0 +1,96 @@ +import asyncio +import random +import string +from collections.abc import Callable + +from pydantic import BaseModel, ConfigDict, Field, InstanceOf + +from beeai_framework.agents.base import BaseAgent, BaseMemory +from beeai_framework.agents.bee import BeeAgent +from beeai_framework.agents.types import ( + AgentMeta, + BeeAgentExecutionConfig, + BeeInput, + BeeRunInput, + BeeRunOutput, +) +from beeai_framework.backend.chat import ChatModel +from beeai_framework.backend.message import AssistantMessage, Message +from beeai_framework.memory import ReadOnlyMemory, UnconstrainedMemory +from beeai_framework.tools.tool import Tool +from beeai_framework.workflows.workflow import Workflow, WorkflowRun + +AgentFactory = Callable[[ReadOnlyMemory], BaseAgent | asyncio.Future[BaseAgent]] + + +class AgentFactoryInput(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + name: str + llm: ChatModel + instructions: str | None = None + tools: list[InstanceOf[Tool]] | None = None + execution: BeeAgentExecutionConfig | None = None + + +class Schema(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + messages: list[Message] = Field(min_length=1) + final_answer: str | None = None + new_messages: list[Message] = [] + + +class AgentWorkflow: + def __init__(self, name: str = "AgentWorkflow") -> "AgentWorkflow": + self.workflow = Workflow(name=name, schema=Schema) + + async def run(self, messages: list[Message]) -> WorkflowRun: + return await self.workflow.run(Schema(messages=messages)) + + def del_agent(self, name: str) -> "AgentWorkflow": + return self.workflow.delete_step(name) + + def add_agent( + self, agent: BaseAgent | Callable[[ReadOnlyMemory], BaseAgent | asyncio.Future[BaseAgent]] | AgentFactoryInput + ) -> "AgentWorkflow": + if isinstance(agent, BaseAgent): + + def factory(memory: ReadOnlyMemory) -> AgentFactory: + agent.memory = memory + return agent + + return self._add(agent.meta.name, factory) + + random_string = "".join(random.choice(string.ascii_letters) for _ in range(4)) + name = agent.name if not callable(agent) else f"Agent{random_string}" + return self._add(name, agent if callable(agent) else self._create_factory(agent)) + + def _create_factory(self, input: AgentFactoryInput) -> AgentFactory: + def factory(memory: BaseMemory) -> BeeAgent: + return BeeAgent( + bee_input=BeeInput( + llm=input.llm, + tools=input.tools or [], + memory=memory, + # template TODO + meta=AgentMeta(name=input.name, description=input.instructions or "", tools=[]), + execution=input.execution, + ) + ) + + return factory + + def _add(self, name: str, factory: AgentFactory) -> "AgentWorkflow": + async def step(state: Schema) -> None: + memory = UnconstrainedMemory() + for message in state.messages + state.new_messages: + await memory.add(message) + + agent: BaseAgent = factory(memory.as_read_only()) + run_output: BeeRunOutput = await agent.run(run_input=BeeRunInput()) + state.final_answer = run_output.result.text + state.new_messages.append( + AssistantMessage(f"Assistant Name: {name}\nAssistant Response: {run_output.result.text}") + ) + + self.workflow.add_step(name, step) + return self diff --git a/python/beeai_framework/workflows/errors.py b/python/beeai_framework/workflows/errors.py new file mode 100644 index 00000000..7c7dbb7a --- /dev/null +++ b/python/beeai_framework/workflows/errors.py @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 + +from beeai_framework.errors import FrameworkError + + +class WorkflowError(FrameworkError): + def __init__(self, message: str = "Workflow error", *, cause: Exception | None = None) -> None: + super().__init__(message, is_fatal=True, is_retryable=False, cause=cause) diff --git a/python/beeai_framework/workflows/workflow.py b/python/beeai_framework/workflows/workflow.py new file mode 100644 index 00000000..d2481520 --- /dev/null +++ b/python/beeai_framework/workflows/workflow.py @@ -0,0 +1,145 @@ +import asyncio +import inspect +from dataclasses import field +from typing import ClassVar, Final, Generic, Literal + +from pydantic import BaseModel +from typing_extensions import TypeVar + +from beeai_framework.utils._types import MaybeAsync +from beeai_framework.utils.models import ModelLike, check_model, to_model +from beeai_framework.workflows.errors import WorkflowError + +T = TypeVar("T", bound=BaseModel) +K = TypeVar("K", default=str) + +WorkflowReservedStepName = Literal["__start__", "__self__", "__prev__", "__next__", "__end__"] +WorkflowHandler = MaybeAsync[[T], K | WorkflowReservedStepName | None] + + +class WorkflowState(BaseModel, Generic[K]): + current: K + prev: K | None = None + next: K | None = None + + +class WorkflowStepRes(BaseModel, Generic[T, K]): + name: K + state: T + + +class WorkflowStepDefinition(BaseModel, Generic[T, K]): + handler: WorkflowHandler[T, K] + + +class WorkflowRun(BaseModel, Generic[T, K]): + state: T + result: T | None = None + steps: list[WorkflowStepRes[T, K]] = field(default_factory=list) + + +class Workflow(Generic[T, K]): + START: Final[Literal["__start__"]] = "__start__" + SELF: Final[Literal["__self__"]] = "__self__" + PREV: Final[Literal["__prev__"]] = "__prev__" + NEXT: Final[Literal["__next__"]] = "__next__" + END: Final[Literal["__end__"]] = "__end__" + + _RESERVED_STEP_NAMES: ClassVar = [START, SELF, PREV, NEXT, END] + + def __init__(self, schema: type[T], name: str = "Workflow") -> None: + self._name = name + self._schema = schema + self._steps: dict[K, WorkflowStepDefinition[T, K]] = {} + self._start_step: K | None = None + + @property + def steps(self) -> dict[K, WorkflowStepDefinition[T, K]]: + return self._steps + + @property + def step_names(self) -> list[K]: + return list(self.steps.keys()) + + @property + def name(self) -> str: + return self._name + + @property + def schema(self) -> type[T]: + return self._schema + + @property + def start_step(self) -> K | None: + return self._start_step + + def add_step(self, step_name: K, runnable: WorkflowHandler[T, K]) -> "Workflow[T, K]": + if (len(step_name.strip())) == 0: + raise ValueError("Step name cannot be empty!") + + if step_name in self.steps: + raise ValueError(f"The name '{step_name}' has already been used!") + + if step_name in Workflow._RESERVED_STEP_NAMES: + raise ValueError(f"The name '{step_name}' is reserved and cannot be used!") + + self.steps[step_name] = WorkflowStepDefinition[T, K](handler=runnable) + + return self + + def delete_step(self, step_name: K) -> "Workflow[T, K]": + if step_name not in self.steps: + raise WorkflowError(f"Step '${step_name}' was not found.") + + del self.steps[step_name] + + if self.start_step == step_name: + self._start_step = None + + return self + + def set_start(self, name: K) -> "Workflow[T, K]": + self._start_step = name + return self + + async def run(self, state: ModelLike[T]) -> WorkflowRun[T, K]: + run = WorkflowRun[T, K](state=to_model(self._schema, state)) + next = self._find_step(self.start_step or self.step_names[0]).current or Workflow.END + + while next and next != Workflow.END: + step = self.steps.get(next) + if step is None: + raise WorkflowError(f"Step '{next}' was not found.") + + step_res = WorkflowStepRes[T, K](name=next, state=run.state.model_copy(deep=True)) + run.steps.append(step_res) + + if inspect.iscoroutinefunction(step.handler): + step_next = await step.handler(step_res.state) + else: + step_next = await asyncio.to_thread(step.handler, step_res.state) + + check_model(step_res.state) + run.state = step_res.state + + # Route to next step + if step_next == Workflow.START: + next = run.steps[0].name + elif step_next == Workflow.PREV: + next = run.steps[-2].name + elif step_next == Workflow.SELF: + next = run.steps[-1].name + elif step_next is None or step_next == Workflow.NEXT: + next = self._find_step(next).next or Workflow.END + else: + next = step_next + + return run + + def _find_step(self, current: K) -> WorkflowState[K]: + index = self.step_names.index(current) + return WorkflowState[K]( + prev=self.step_names[index - 1] if 0 <= index - 1 < len(self.step_names) else None, + current=self.step_names[index], + next=self.step_names[index + 1] if 0 <= index + 1 < len(self.step_names) else None, + ) diff --git a/python/cz_commitizen/__init__.py b/python/cz_commitizen/__init__.py new file mode 100644 index 00000000..69f11972 --- /dev/null +++ b/python/cz_commitizen/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2025 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from cz_commitizen.monorepo_commits import MonorepoCommitsCz + +__all__ = ["MonorepoCommitsCz"] diff --git a/python/cz_commitizen/monorepo_commits.py b/python/cz_commitizen/monorepo_commits.py new file mode 100644 index 00000000..6424092a --- /dev/null +++ b/python/cz_commitizen/monorepo_commits.py @@ -0,0 +1,73 @@ +# Copyright 2025 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from commitizen import git +from commitizen.cz.conventional_commits import ConventionalCommitsCz + +__all__ = ["MonorepoCommitsCz"] + +from commitizen.defaults import Questions + + +class MonorepoCommitsCz(ConventionalCommitsCz): + change_type_map = { # noqa: RUF012 + "feat": "Features", + "fix": "Bug Fixes", + "refactor": "Refactor", + "perf": "Performance Improvements", + } + + def changelog_message_builder_hook(self, parsed_message: dict, commit: git.GitCommit) -> dict | list | None: + changed_files = git.get_filenames_in_commit(commit.rev) or [] + + has_python_changes = any(file.startswith("python/") for file in changed_files) + if not has_python_changes: + return None + + parent_hook = super().changelog_message_builder_hook + return parent_hook(parsed_message, commit) if parent_hook else parsed_message + + def questions(self) -> Questions: + questions = super().questions() + for index, question in enumerate(questions): + if question["name"] == "scope": + questions[index] = { + "type": "list", + "name": "scope", + "message": "What is the scope of this change?", + "filter": lambda value: value or "", + "choices": [ + {"name": name or "", "value": name} + for name in [ + None, + "adapters", + "agents", + "backend", + "tools", + "cache", + "emitter", + "internals", + "logger", + "memory", + "serializer", + "infra", + "deps", + "instrumentation", + "workflows", + ] + ], + } + break + + return questions diff --git a/python/docs/CODE_OF_CONDUCT.md b/python/docs/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..c30c3a55 --- /dev/null +++ b/python/docs/CODE_OF_CONDUCT.md @@ -0,0 +1,53 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at [IBM Cloud Support](https://www.ibm.com/cloud/support). +All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-1.4-4baaaa.svg)](code_of_conduct.md) + +For answers to common questions about this code of conduct, see +[www.contributor-covenant.org/faq](https://www.contributor-covenant.org/faq) diff --git a/python/docs/CONTRIBUTING.md b/python/docs/CONTRIBUTING.md new file mode 100644 index 00000000..d77a9019 --- /dev/null +++ b/python/docs/CONTRIBUTING.md @@ -0,0 +1,223 @@ +# Contributing + +BeeAI Python is an open-source project committed to bringing LLM agents to people of all backgrounds. This page describes how you can join the BeeAI community in this goal. + +## Before you start + +If you are new to BeeAI contributing, we recommend you do the following before diving into the code: + +- Read [Code of Conduct](./CODE_OF_CONDUCT.md). + +## Style and lint + +BeeAI Python uses the following tools to meet code quality standards and ensure a unified code style across the codebase: + +We use the following libs to check the Python code: + +- [Black](https://black.readthedocs.io/) - Code Formatter +- [Ruff](https://beta.ruff.rs/docs/) - Fast Python linter + +Simple [scripts for Poetry](dev_tools/scripts.py) are included to help you to review your changes and commit them. + +## Issues and pull requests + +We use GitHub pull requests to accept contributions. + +While not required, opening a new issue about the bug you're fixing or the feature you're working on before you open a pull request is important in starting a discussion with the community about your work. The issue gives us a place to talk about the idea and how we can work together to implement it in the code. It also lets the community know what you're working on, and if you need help, you can reference the issue when discussing it with other community and team members. + +If you've written some code but need help finishing it, want to get initial feedback on it before finishing it, or want to share it and discuss it prior to completing the implementation, you can open a Draft pull request and prepend the title with the [WIP] tag (for Work In Progress). This will indicate to reviewers that the code in the PR isn't in its final state and will change. It also means we will only merge the commit once it is finished. You or a reviewer can remove the [WIP] tag when the code is ready to be thoroughly reviewed for merging. + +## Choose an issue to work on + +BeeAI Python uses the following labels to help non-maintainers find issues best suited to their interests and experience level: + +- [good first issue](https://github.com/i-am-bee/beeai-framework/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) - these issues are typically the simplest available to work on, ideal for newcomers. They should already be fully scoped, with a straightforward approach outlined in the descriptions. +- [help wanted](https://github.com/i-am-bee/beeai-framework/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) - these issues are generally more complex than good first issues. They typically cover work that core maintainers don't currently have the capacity to implement and may require more investigation/discussion. These are great options for experienced contributors looking for something more challenging. + +## Setting up a local development environment + +### Prerequisites + +For development, there are some tools you will need prior cloning the code. + +#### Python +We recommend using Python 3.11 or higher. First, ensure you have Python installed: + +```bash +python --version +``` + +#### Poetry + +[Poetry](https://python-poetry.org/) is a tool for Python packaging, dependency and virtual environment management that is used to manage the development of this project. Verify version two (V2) is installed on your machine. There are several ways to install it including through the package manager of your operating system, however, the easiest way to install is using the official installer, as follows: + +```bash +curl -sSL https://install.python-poetry.org | python3 - +``` + +You can also use `pip` and `pipx` to install poetry. + +Once you have Poetry installed, you will also need to add the poetry shell plugin: + +```bash +poetry self add poetry-plugin-shell +``` + +> [!IMPORTANT] +> You must have poetry >= 2.0 installed + +### Clone and set up the code + +Follow these steps: + +```bash +# Clone the repository +git clone https://github.com/i-am-bee/beeai-framework.git + +# Ensure you have the pre-commit hooks installed +pre-commit install + +# Use Poetry to install the project dependencies and activate a virtual environment +poetry install +poetry shell + +# Copy .env.example to .env and fill in required values +cp .env.example .env +``` + +### Build the pip package + +#### Build the package: + +```bash +poetry build +``` + +#### Test the Build Locally (Recommended) + +Note: This should be be done outside an existing virtual environment or poetry shell. + +```bash +# Create a virtual environment +python -m venv test_env + +source test_env/bin/activate # On Windows: test_env\Scripts\activate + +# Install the built package +pip install dist/beeai-framework-0.1.0.tar.gz +``` + +#### Publish to TestPyPI + +```bash +# Configure Poetry: +poetry config repositories.testpypi https://test.pypi.org/legacy/ +# Publish +poetry publish -r testpypi +#Test the installation +pip install --index-url https://test.pypi.org/simple/ beeai-framework +``` + +#### Run Linters/Formatters +Ensure your changes meet code quality standards: + +- lint: use the next command run Black and Ruff: + +```bash +poetry run lint +``` + +#### Run Tests +Ensure your changes pass all tests: + +```bash +# Run unit tests +pytest tests/unit +# Run integration tests +pytest tests/integration +# Run E2E tests +pytest tests/e2e +``` + +#### Follow Conventional Commit Messages +We use [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/#summary) to structure our commit messages. Please use the following format: + +``` +(): +``` + +- Type: feat, fix, chore, docs, style, refactor, perf, test, etc. +- Scope: The area of the codebase your changes affect (optional). The allowed values are: adapters, agents, llms, tools, cache, emitter, internals, logger, memory, serializer, infra, deps, instrumentation +- Subject: A short description of the changes (required) + +_Example:_ + +``` +feat(llm): add streaming support for watsonx adapter + +Ref: #15 +``` + +#### Commit: + + - commit: for convenience you can use the following command to sign-off your commit with `-s` and generate the commit. + +```bash +poetry run commit "(): " +``` + +By following these steps, you'll be all set to contribute to our project! If you encounter any issues during the setup process, please feel free to open an issue. + +## Updating examples and embedding +Currently [embedme](https://github.com/zakhenry/embedme) is used to embed code examples directly in documentation. Supported file types can be found [here](https://github.com/zakhenry/embedme?tab=readme-ov-file#multi-language). + +Once an example is edited or a new one is created and referenced running the following command will update the documentation. + +```bash +poetry run embedme +``` + +## Legal + +The following sections detail important legal information that should be viewed prior to contribution. + +### License and Copyright + +Distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0). + +SPDX-License-Identifier: [Apache-2.0](https://spdx.org/licenses/Apache-2.0) + +If you would like to see the detailed LICENSE click [here](/LICENSE). + +### Developer Certificate of Origin (DCO) + +We have tried to make it as easy as possible to make contributions. This applies to how we handle the legal aspects of contribution. We use the same approach - the [Developer's Certificate of Origin 1.1 (DCO)](https://developercertificate.org/) - that the Linuxยฎ Kernel [community](https://docs.kernel.org/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin) uses to manage code contributions. + +We ask that when submitting a patch for review, the developer must include a sign-off statement in the commit message. If you set your `user.name` and `user.email` in your `git config` file, you can sign your commit automatically by using the following command: + +```bash +git commit -s +``` + +If a commit has already been created but signoff was missed this can be remedied + +```bash +git --amend -s +``` + +The following example includes a `Signed-off-by:` line, which indicates that the submitter has accepted the DCO: + +```txt +Signed-off-by: John Doe +``` + +We automatically verify that all commit messages contain a `Signed-off-by:` line with your email address. + +#### Useful tools for doing DCO signoffs + +While the web ui natively supports this now, there are a number of tools that make it easier for developers to manage DCO signoffs if not using the web interface. + +- DCO command line tool, which lets you do a single signoff for an entire repo ( ) +- GitHub UI integrations for adding the signoff automatically ( ) +- Chrome - +- Firefox - diff --git a/python/docs/README.md b/python/docs/README.md new file mode 100644 index 00000000..bedd1a9b --- /dev/null +++ b/python/docs/README.md @@ -0,0 +1,142 @@ +> [!WARNING] +> PRE-Alpha! Please reach out if you want to get involved in the discussions. All feedback is welcomed + +

+ BeeAI Framework logo +

BeeAI Agent Framework for Python

+

+ +

+ Project Status: Alpha +

Python implementation of the BeeAI Agent Framework for building, deploying, and serving powerful agentic workflows at scale.

+

+ +The BeeAI Agent Framework for Python makes it easy to build scalable agent-based workflows with your model of choice. This framework is designed to perform robustly with [IBM Granite](https://www.ibm.com/granite?adoper=255252_0_LS1) and [Llama 3.x](https://ai.meta.com/blog/meta-llama-3-1/) models. Varying level of support is currently available for [other LLMs using LiteLLM](https://docs.litellm.ai/docs/providers). We're actively working on optimizing its performance with these popular LLMs. + +Our goal is to empower Python developers to adopt the latest open-source and proprietary models with minimal changes to their current agent implementation. + +## Key Features + +- ๐Ÿค– **AI agents**: Use our powerful BeeAI agent refined for Llama 3.x and Granite 3.x, or build your own. +- ๐Ÿ› ๏ธ **Tools**: Use our built-in tools or create your own in Python. +- ๐Ÿ’พ **Memory**: Multiple strategies to optimize token spend. +- ... more on our Roadmap + +## Getting Started + +### Installation + +```bash +pip install beeai-framework +``` + +### Quick Example + +```py +from beeai_framework import BeeAgent, LLM + +agent = BeeAgent(llm=LLM()) + +agent.run("What is the capital of Massachusetts") +``` + +> [!NOTE] +> To run this example, ensure you have [ollama](https://ollama.com) installed with the [llama3.1](https://ollama.com/library/llama3.1) model downloaded. + +to run other examples you can use, "python -m examples/beeai/[example_name]": + +```bash +python examples/basic.py +``` + +## Local Development + +Please check our [contributing guide](./CONTRIBUTING.md) + +### Prerequisites + +For development, there are some tools you will need prior cloning the code. + +#### Poetry + +[Poetry](https://python-poetry.org/) is a tool for Python packaging, dependency and virtual environment management that is used to manage the development of this project. Verify version 2 is installed on your machine. There are several ways to install it including through the package manager of your operating system, however, the easiest way to install is using the official installer, as follows: + +```bash +curl -sSL https://install.python-poetry.org | python3 - +``` + +You can also use `pip` and `pipx` to install poetry. + +Once you have Poetry installed, you will also need to add the poetry shell plugin: + +```bash +poetry self add poetry-plugin-shell +``` + +> [!IMPORTANT] +> You must have poetry >= 2.0 installed + +### Clone and set up the code + +Follow these steps: + +```bash +# Clone the repository +git clone https://github.com/i-am-bee/beeai-framework + +cd python + +# Use Poetry to install the project dependencies and activate a virtual environment +poetry install +poetry shell + +# Copy .env.example to .env and fill in required values +cp .env.example .env +``` + +### Build the pip package + +#### Build the package: + +```bash +poetry build +``` + +## Modules + +The package provides several modules: + +| Module | Description | +| -------- | ----------------------------------------------------- | +| `agents` | Base classes defining the common interface for agents | +| `llms` | Base classes for text inference (standard or chat) | +| `tools` | Tools that an agent can use | + +## Roadmap + +- ๐Ÿ‘ฉโ€๐Ÿ’ป **Code interpreter**: Run code safely in a sandbox container. +- โธ๏ธ **Serialization**: Handle complex agentic workflows and easily pause/resume them without losing state. +- ๐Ÿ” **Instrumentation**: Full visibility of your agent's inner workings. +- ๐ŸŽ›๏ธ **Production-level** control with caching and error handling. +- ๐Ÿ” **API**: OpenAI-compatible Assistants API integration. +- BeeAI agent performance optimization with additional models +- Examples, tutorials, and comprehensive documentation +- Improvements to building custom agents +- Multi-agent orchestration +- Feature parity with TypeScript version + +## Contributing + +The BeeAI Agent Framework for Python is an open-source project and we โค๏ธ contributions. Please check our [contribution guidelines](./CONTRIBUTING.md) before getting started. + +### Reporting Issues + +We use [GitHub Issues](https://github.com/i-am-bee/beeai-framework/issues) to track public bugs. Please check existing issues before filing new ones. + +### Code of Conduct + +This project adheres to our [Code of Conduct](./CODE_OF_CONDUCT.md). By participating, you are expected to uphold this code. + +## Legal Notice + +Initial content in these repositories including code has been provided by IBM under the associated open source software license and IBM is under no obligation to provide enhancements, updates, or support. IBM developers produced this code as an open source project (not as an IBM product), and IBM makes no assertions as to the level of quality nor security, and will not be maintaining this code going forward. diff --git a/python/docs/SECURITY.md b/python/docs/SECURITY.md new file mode 100644 index 00000000..656f559d --- /dev/null +++ b/python/docs/SECURITY.md @@ -0,0 +1,15 @@ +# Security Policy + +## Reporting a Vulnerability + +To report vulnerabilities, you can privately report a potential security issue +via the GitHub security vulnerabilities feature. This can be done here: + +https://github.com/i-am-bee/beeai-framework/security/advisories + +Please do **not** open a public issue about a potential security vulnerability. + +You can find more details on the security vulnerability feature in the GitHub +documentation here: + +https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability diff --git a/python/docs/_sidebar.md b/python/docs/_sidebar.md new file mode 100644 index 00000000..70620f2e --- /dev/null +++ b/python/docs/_sidebar.md @@ -0,0 +1,25 @@ +- Getting started + + - [Home](/) + - [Overview](overview.md) + - [Examples](examples.md) + - [Tutorials](tutorials.md) + +- Modules + + - [Agents](agents.md) + - [Workflows](workflows.md) + - [Backend](backend.md) + - [Templates](templates.md) + - [Memory](memory.md) + - [Emitter](emitter.md) + - [Serialization](serialization.md) + - [Tools](tools.md) + - [Cache](cache.md) + - [Logger](logger.md) + - [Version](version.md) + - [Error Handling](errors.md) + +- Others + + - [Integrations](integrations.md) diff --git a/python/docs/agents.md b/python/docs/agents.md new file mode 100644 index 00000000..efe0b151 --- /dev/null +++ b/python/docs/agents.md @@ -0,0 +1,94 @@ +# Agents + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +AI agents built on large language models control the path to solving a complex problem. They can typically act on feedback to refine their plan of action, a capability that can improve performance and help them accomplish more sophisticated tasks. + +We recommend reading the [following article](https://research.ibm.com/blog/what-are-ai-agents-llm) to learn more. + +## Implementation in BeeAI Framework + +An agent can be thought of as a program powered by LLM. The LLM generates structured output that is then processed by your program. + +Your program then decides what to do next based on the retrieved content. It may leverage a tool, reflect, or produce a final answer. +Before the agent determines the final answer, it performs a series of `steps`. A step might be calling an LLM, parsing the LLM output, or calling a tool. + +Steps are grouped in a `iteration`, and every update (either complete or partial) is emitted to the user. + +### Bee Agent + +Our Bee Agent is based on the `ReAct` ([Reason and Act](https://arxiv.org/abs/2210.03629)) approach. + +Hence, the agent in each iteration produces one of the following outputs. + +For the sake of simplicity, imagine that the input prompt is "What is the current weather in Las Vegas?" + +First iteration: + +``` +thought: I need to retrieve the current weather in Las Vegas. I can use the OpenMeteo function to get the current weather forecast for a location. +tool_name: OpenMeteo +tool_input: {"location": {"name": "Las Vegas"}, "start_date": "2024-10-17", "end_date": "2024-10-17", "temperature_unit": "celsius"} +``` + +> [!NOTE] +> +> Agent emitted 3 complete updates in the following order (`thought`, `tool_name`, `tool_input`) and tons of partial updates in the same order. +> Partial update means that new tokens are being added to the iteration. Updates are always in strict order: You first get many partial updates for thought, followed by a final update for thought (that means no final updates are coming for a given key). + +Second iteration: + +``` +thought: I have the current weather in Las Vegas in Celsius. +final_answer: The current weather in Las Vegas is 20.5ยฐC with an apparent temperature of 18.3ยฐC. +``` + +For more complex tasks, the agent may do way more iterations. + +In the following example, we will transform the knowledge gained into code. + +```txt +Coming soon +``` + +### Behaviour + +You can alter the agent's behavior in the following ways. + +#### Setting execution policy + +```txt +Coming soon +``` + +> [!NOTE] +> +> The default is zero retries and no timeout. + +##### Overriding prompt templates + +The agent uses the following prompt templates. + +1. **System Prompt** + +2. **User Prompt** (to reformat the user's prompt) + +3. **User Empty Prompt** + +4. **Tool Error** + +5. **Tool Input Error** (validation error) + +6. **Tool No Result Error** + +7. **Tool Not Found Error** + +8. **Invalid Schema Error** (output from LLM cannot be processed) + +Please refer to the [following example](/examples/agents/bee_advanced.py) to see how to modify them. + +## Creating your own agent + +To create your own agent, you must implement the agent's base class (`BaseAgent`). + +Python example coming soon. /examples/agents/custom_agent.py TODO diff --git a/python/docs/assets/Bee_Dark.svg b/python/docs/assets/Bee_Dark.svg new file mode 100644 index 00000000..3806d801 --- /dev/null +++ b/python/docs/assets/Bee_Dark.svg @@ -0,0 +1,16 @@ + + + + + + + + + + .py + + + + + + diff --git a/python/docs/backend.md b/python/docs/backend.md new file mode 100644 index 00000000..1efc00c3 --- /dev/null +++ b/python/docs/backend.md @@ -0,0 +1,109 @@ +# Backend + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +> [!TIP] +> +> Location for concrete implementations within the framework `beeai/adapters/provider/backend`. +> +> Location for base abstraction within the framework `beeai/backend`. + +The backend module is an umbrella module that encapsulates a unified way to work with the following functionalities: + +- Chat Models via (`ChatModel` class) +- Embedding Models via (`EmbeddingModel` class) +- Audio Models (coming soon) +- Image Models (coming soon) + + + +## Providers (implementations) + +The following table depicts supported providers. + +| Name | Chat | Embedding | Dependency | Environment Variables | +| ---------------- | :--: | :-------: | ------------------------ | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `Ollama` | โœ… | โœ… | `ollama-ai-provider` | OLLAMA_CHAT_MODEL
OLLAMA_EMBEDDING_MODEL
OLLAMA_BASE_URL | +| `Watsonx` | โœ… | โœ… | `@ibm-cloud/watsonx-ai` | WATSONX_CHAT_MODEL
WATSONX_EMBEDDING_MODEL
WATSONX_API_KEY
WATSONX_PROJECT_ID
WATSONX_SPACE_ID
WATSONX_VERSION
WATSONX_REGION | + +> [!TIP] +> +> If you don't see your provider raise an issue [here](https://github.com/i-am-bee/bee-py/discussions). Meanwhile, you can use [Ollama adapter](/examples/backend/providers/ollama.py). + +### Initialization + +```txt +Coming soon +``` + +All providers examples can be found in [examples/backend/providers](/examples/backend/providers). + +## Chat Model + +The `ChatModel` class represents a Chat Large Language Model and can be initiated in one of the following ways. + +```txt +Coming soon +``` + +or you can always create the concrete provider's chat model directly + +```txt +Coming soon +``` + +### Configuration + +```txt +Coming soon +``` + +### Generation + +```txt +Coming soon +``` + +> [!NOTE] +> +> Execution parameters (those passed to `model.create({...})`) are superior to ones defined via `config`. + +### Stream + +```txt +Coming soon +``` + +### Structured Generation + +```py +``` + +Source: /examples/backend/structured.py TODO + +### Tool Calling + +```py +``` + +Source: /examples/backend/toolCalling.py TODO + +## Embedding Model + +The `EmbedingModel` class represents an Embedding Model and can be initiated in one of the following ways. + +``` +Coming soon +``` + +or you can always create the concrete provider's embedding model directly + +``` +Coming soon +``` + +### Usage + +```txt +Coming soon +``` diff --git a/python/docs/cache.md b/python/docs/cache.md new file mode 100644 index 00000000..f4a6fa81 --- /dev/null +++ b/python/docs/cache.md @@ -0,0 +1,102 @@ +# Cache + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +Caching is a process used to temporarily store copies of data or computations in a cache (a storage location) to facilitate faster access upon future requests. The primary purpose of caching is to improve the efficiency and performance of systems by reducing the need to repeatedly fetch or compute the same data from a slower or more resource-intensive source. + +## Usage + +### Capabilities showcase + + +```py +``` + +_Source: /examples/cache/unconstrainedCache.py TODO + +### Caching function output + intermediate steps + +```py +``` + +_Source: /examples/cache/unconstrainedCacheFunction.py TODO + +### Usage with tools + +```py +``` + +_Source: /examples/cache/toolCache.py TODO + +### Usage with LLMs + +```py +``` + +_Source: /examples/cache/llmCache.py TODO + +## Cache types + +The framework provides multiple out-of-the-box cache implementations. + +### UnconstrainedCache + +```py +``` + +### SlidingCache + +```py +``` + +_Source: /examples/cache/slidingCache.py TODO + +### FileCache + +```py +``` + +_Source: /examples/cache/fileCache.py TODO + +#### Using a custom provider + +```py +``` + +_Source: /examples/cache/fileCacheCustomProvider.py TODO + +### NullCache + +The special type of cache is `NullCache` which implements the `BaseCache` interface but does nothing. + +The reason for implementing is to enable [Null object pattern](https://en.wikipedia.org/wiki/Null_object_pattern). + +### @Cache (decorator cache) + + +```py +``` + +_Source: /examples/cache/decoratorCache.py TODO + +**Complex example** + +```py +``` + +_Source: /examples/cache/decoratorCacheComplex.py TODO + + +### CacheFn + +```py +``` + +_Source: /examples/cache/cacheFn.py TODO + +## Creating a custom cache provider + +```py +``` + +_Source: /examples/cache/custom.py TODO diff --git a/python/docs/emitter.md b/python/docs/emitter.md new file mode 100644 index 00000000..bd5c5195 --- /dev/null +++ b/python/docs/emitter.md @@ -0,0 +1,57 @@ +# Emitter (Observability) + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +> Location within the framework `bee-py/emitter`. + +An emitter is a core functionality of the framework that allows you to see what is happening under the hood. + +## Standalone usage + +The following examples demonstrate how [`Emitter`](/beeai/utils/events.py) concept works. + +### Basic Usage + +```py +``` + +_Source: /examples/emitter/base.py TODO + +> [!NOTE] +> +> You can create your own emitter by initiating the `Emitter` class, but typically it's better to use or fork the root one. + +### Advanced + +```py +``` + +_Source: /examples/emitter/advanced.py TODO + + +### Event Matching + +```py +``` + +_Source: /examples/emitter/matchers.py TODO + +### Event Piping + + +```py +``` + +_Source: /examples/emitter/piping.py TODO + +## Framework Usage + +Typically, you consume out-of-the-box modules that use the `Emitter` concept on your behalf. + +## Agent usage + + +```py +``` + +_Source: /examples/emitter/agentMatchers.py TODO diff --git a/python/docs/errors.md b/python/docs/errors.md new file mode 100644 index 00000000..a74b1496 --- /dev/null +++ b/python/docs/errors.md @@ -0,0 +1,61 @@ +# Error Handling + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +Error handling is a critical part of any JavaScript application, especially when dealing with asynchronous operations, various error types, and error propagation across multiple layers. In the BeeAI Framework, we provide a robust and consistent error-handling structure that ensures reliability and ease of debugging. + +## The `FrameworkError` class + +All errors thrown within the BeeAI Framework extend from the base FrameworkError class. + +Benefits of using `FrameworkError`: + +- **Multiple Error Handling**: Supports handling multiple errors simultaneously, which is particularly useful in asynchronous or concurrent operations. +- **Preserved Error Chains**: Retains the full history of errors, giving developers greater context for debugging. +- **Consistent Structure:** All errors across the framework share a uniform structure, simplifying error tracking and management. +- **Native Support:** Built on native Python functionality, avoiding the need for additional dependencies while leveraging familiar mechanisms. +- **Utility Functions:** Includes methods for formatting error stack traces and explanations, making them suitable for use with LLMs and other external tools. + +This structure ensures that users can trace the complete error history while clearly identifying any errors originating from the BeeAI Framework. + +```py +``` + +_Source: /examples/errors/base.py TODO + +## Specialized Error Classes + +The BeeAI Framework extends FrameworkError to create specialized error classes for different components. This ensures that each part of the framework has clear and well-defined error types, improving debugging and error handling. + +> [!TIP] +> +> Casting an unknown error to a `FrameworkError` can be done by calling the `FrameworkError.ensure` static method ([example TODO]()). + +### Tools + +When a tool encounters an error, it throws a `ToolError`, which extends `FrameworkError`. If input validation fails, a `ToolInputValidationError` (which extends `ToolError`) is thrown. + +```py +``` + +_Source: /examples/errors/tool.py TODO + +> [!TIP] +> +> If you throw a `ToolError` intentionally in a custom tool, the framework will not apply any additional "wrapper" errors, preserving the original error context. + +### Agents + +Throw `AgentError` class which extends `FrameworkError` class. + +### Prompt Templates + +Throw `PromptTemplateError` class which extends `FrameworkError` class. + +### Loggers + +Throw `LoggerError` class which extends `FrameworkError` class. + +### Serializers + +Throw `SerializerError` class which extends `FrameworkError` class. diff --git a/python/docs/examples.md b/python/docs/examples.md new file mode 100644 index 00000000..70622343 --- /dev/null +++ b/python/docs/examples.md @@ -0,0 +1,145 @@ +# BeeAI Framework Examples + +This repository contains examples demonstrating the usage of the BeeAI Framework, a toolkit for building AI agents and applications. + +## Table of Contents + +1. [Agents](#agents) +2. [Workflows](#workflows) +3. [Cache](#cache) +4. [Errors](#errors) +5. [Helpers](#helpers) +6. [LLMs (Language Models)](#llms-language-models) +7. [Logger](#logger) +8. [Memory](#memory) +9. [Serialization](#serialization) +10. [Templates](#templates) +11. [Tools](#tools) + +## Agents + +- [`bee.py`](/examples/agents/bee.py): Basic Bee Agent implementation +- [`bee_advanced.py`](/examples/agents/bee_advanced.py): Advanced Bee Agent with custom configurations +- [`bee_reusable.py`](/examples/agents/bee_reusable.py): Demonstration of serializing and reusing Bee Agents +- [`custom_agent.py`](/examples/agents/custom_agent.py): Example of creating a custom agent +- [`granite_bee.py`](/examples/agents/granite/granite_bee.py): Basic Bee Agent using an IBM Granite LLM +- [`granite_wiki_bee.py`](/examples/agents/granite/granite_wiki_bee.py): Advanced Bee Agent using an IBM Granite LLM with wikipedia retrieval +- [`simple.py`](/examples/agents/simple.py): Simple agent implementation +- [`sql.py`](/examples/agents/sql.py): Agent for SQL-related tasks + +## Workflows + +- [`simple.py`](/examples/workflows/simple.py): Introduction to workflows +- [`nesting.py`](/examples/workflows/nesting.py): How to nest workflows +- [`agent.py`](/examples/workflows/agent.py): Using workflows to interconnect two agents with a critique step. +- [`multiAgents.py`](/examples/workflows/multiAgents.py): Multi-step sequential agentic workflow. +- [`contentCreator.py`](/examples/workflows/contentCreator.py): Multi-step workflow for writing blog posts. + +## Cache + +- [`cacheFn.py`](/examples/cache/cacheFn.py): Function caching example +- [`custom.py`](/examples/cache/custom.py): Custom cache implementation +- [`decoratorCache.py`](/examples/cache/decoratorCache.py): Cache decorator usage +- [`decoratorCacheComplex.py`](/examples/cache/decoratorCacheComplex.py): Complex cache decorator example +- [`fileCache.py`](/examples/cache/fileCache.py): File-based caching +- [`fileCacheCustomProvider.py`](/examples/cache/fileCacheCustomProvider.py): Custom provider for file cache +- [`slidingCache.py`](/examples/cache/slidingCache.py): Sliding window cache implementation +- [`toolCache.py`](/examples/cache/toolCache.py): Caching for tools +- [`unconstrainedCache.py`](/examples/cache/unconstrainedCache.py): Unconstrained cache example +- [`unconstrainedCacheFunction.py`](/examples/cache/unconstrainedCacheFunction.py): Function using unconstrained cache + +## Errors + +- [`base.py`](/examples/errors/base.py): Basic error handling +- [`cast.py`](/examples/errors/cast.py): Error casting example +- [`tool.py`](/examples/errors/tool.py): Tool-specific error handling + +## Helpers + +- [`io.py`](/examples/helpers/io.py): Input/Output helpers +- [`setup.py`](/examples/helpers/setup.py): Setup utilities + +## LLMs (Language Models) + +- [`chat.py`](/examples/backend/chat.py): Chat-based language model usage +- [`chatCallback.py`](/examples/backend/chatStream.py): Callbacks for chat models +- [`structured.py`](/examples/backend/structured.py): Structured output from language models + +### LLM Providers + +- [`ollama.py`](/examples/backend/providers/ollama.py): Ollama model usage +- [`watsonx.py`](/examples/backend/providers/watsonx.py): Watsonx integration + +## Logger + +- [`agent.py`](/examples/logger/agent.py): Agent-specific logging +- [`base.py`](/examples/logger/base.py): Basic logging setup +- [`pino.py`](/examples/logger/pino.py): Pino logger integration + +## Memory + +- [`agentMemory.py`](/examples/memory/agentMemory.py): Memory management for agents +- [`custom.py`](/examples/memory/custom.py): Custom memory implementation +- [`llmMemory.py`](/examples/memory/llmMemory.py): Memory for language models +- [`slidingMemory.py`](/examples/memory/slidingMemory.py): Sliding window memory +- [`summarizeMemory.py`](/examples/memory/summarizeMemory.py): Memory with summarization +- [`tokenMemory.py`](/examples/memory/tokenMemory.py): Token-based memory +- [`unconstrainedMemory.py`](/examples/memory/unconstrainedMemory.py): Unconstrained memory example + +## Serialization + +- [`base.py`](/examples/serialization/base.py): Basic serialization +- [`context.py`](/examples/serialization/context.py): Context serialization +- [`customExternal.py`](/examples/serialization/customExternal.py): Custom external serialization +- [`customInternal.py`](/examples/serialization/customInternal.py): Custom internal serialization +- [`memory.py`](/examples/serialization/memory.py): Memory serialization + +## Templates + +- [`arrays.py`](/examples/templates/arrays.py): Array-based templates +- [`forking.py`](/examples/templates/forking.py): Template forking +- [`functions.py`](/examples/templates/functions.py): Function-based templates +- [`objects.py`](/examples/templates/objects.py): Object-based templates +- [`primitives.py`](/examples/templates/primitives.py): Primitive data type templates + +## Tools + +- [`advanced.py`](/examples/tools/advanced.py): Advanced tool usage +- [`agent.py`](/examples/tools/agent.py): Agent-specific tools +- [`base.py`](/examples/tools/base.py): Basic tool implementation +- [`mcp.py`](/examples/tools/mcp.py): MCP tool usage + +### Custom Tools + +- [`base.py`](/examples/tools/custom/base.py): Custom tool base implementation +- [`dynamic.py`](/examples/tools/custom/dynamic.py): Dynamic tool creation +- [`openLibrary.py`](/examples/tools/custom/openLibrary.py): OpenLibrary API tool +- [`python.py`](/examples/tools/custom/python.py): Python-based custom tool + +- [`langchain.py`](/examples/tools/langchain.py): LangChain tool integration + +## Usage + +To run these examples, make sure you have the BeeAI Framework cloned and properly configured. Each file demonstrates a specific feature or use case of the framework. You can run individual examples using Python. + +1. Clone the repository: + ```bash + git clone git@github.com:i-am-bee/beeai-framework + ``` +2. Install dependencies: + ```bash + pip install . + ``` +3. Create `.env` file (from `.env.template`) and fill in missing values (if any). + +4. Run an arbitrary example, use the following command: + + ```bash + python examples/path/to/example.py + ``` + +For more detailed information on the BeeAI Framework, please refer to the [documentation](/docs/README.md). + +> [!TIP] +> +> To run examples that use Ollama, be sure that you have installed [Ollama](https://ollama.com) with the [llama3.1](https://ollama.com/library/llama3.1) model downloaded. diff --git a/python/docs/index.html b/python/docs/index.html new file mode 100644 index 00000000..d8b1791d --- /dev/null +++ b/python/docs/index.html @@ -0,0 +1,97 @@ + + + + + BeeAI Framework Docs + + + + + + + + +
+ + + + + + + + + + diff --git a/python/docs/instrumentation.md b/python/docs/instrumentation.md new file mode 100644 index 00000000..75cfbcc9 --- /dev/null +++ b/python/docs/instrumentation.md @@ -0,0 +1,74 @@ +# OpenTelemetry Instrumentation in Bee-Py + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +This document provides an overview of the OpenTelemetry instrumentation setup in Bee-Py. +The implementation is designed to [create telemetry spans](https://opentelemetry.io/docs/languages/python/instrumentation/#creating-spans) for observability when instrumentation is enabled. + +## Overview + +OpenTelemetry instrumentation allows you to collect telemetry data, such as traces and metrics, to monitor the performance of your services. +This setup involves creating middleware to handle instrumentation automatically when the `INSTRUMENTATION_ENABLED` flag is active. + +## Setting up OpenTelemetry + +Follow the official OpenTelemetry [Python Getting Started Guide](https://opentelemetry.io/docs/languages/python/getting-started/) to initialize and configure OpenTelemetry in your application. + +## Instrumentation Configuration + +### Environment Variable + +Use the environment variable `BEE_FRAMEWORK_INSTRUMENTATION_ENABLED` to enable or disable instrumentation. + +```bash +# Enable instrumentation +export BEE_FRAMEWORK_INSTRUMENTATION_ENABLED=true +# Ignore sensitive keys from collected events data +export INSTRUMENTATION_IGNORED_KEYS="apiToken,accessToken" +``` + +If `BEE_FRAMEWORK_INSTRUMENTATION_ENABLED` is false or unset, the framework will run without instrumentation. + +## Creating Custom Spans + +You can manually create spans during the `run` process to track specific parts of the execution. This is useful for adding custom telemetry to enhance observability. + +Example of creating a span: + +```txt +Coming soon +``` + +## Verifying Instrumentation + +Once you have enabled the instrumentation, you can view telemetry data using any [compatible OpenTelemetry backend](https://opentelemetry.io/docs/languages/js/exporters/), such as [Jaeger](https://www.jaegertracing.io/), [Zipkin](https://zipkin.io/), [Prometheus](https://prometheus.io/docs/prometheus/latest/feature_flags/#otlp-receiver), etc... +Ensure your OpenTelemetry setup is properly configured to export trace data to your chosen backend. + +### Agent instrumentation + +Running the Instrumented Application file. + +```bash +TODO +``` + +### LLM instrumentation + +Running the LLM Instrumentation file. + +```bash +TODO +``` + +### Tool instrumentation + +Running Tool Instrumentation file. + +```bash +TODO +``` + +## Conclusion + +This setup provides basic OpenTelemetry instrumentation with the flexibility to enable or disable it as needed. +By creating custom spans and using `createTelemetryMiddleware`, you can capture detailed telemetry for better observability and performance insights. diff --git a/python/docs/integrations.md b/python/docs/integrations.md new file mode 100644 index 00000000..917fce34 --- /dev/null +++ b/python/docs/integrations.md @@ -0,0 +1,13 @@ +# Integrations + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +BeeAI Framework is open-source framework for building, deploying, and serving powerful multi-agent workflows at scale which includes integrating with other agent frameworks. + +## LangGraph + + +```py +``` + +_Source: /examples/integrations/langgraph.py TODO diff --git a/python/docs/logger.md b/python/docs/logger.md new file mode 100644 index 00000000..67c4a5a4 --- /dev/null +++ b/python/docs/logger.md @@ -0,0 +1,27 @@ +# Logger + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +> [!TIP] +> +> Location within the framework `beeai/utils`. + +The Logger is a key component designed to record and track events, errors, and other important actions during an application's execution. It provides valuable insights into the application's behavior, performance, and potential issues, helping developers and system administrators troubleshoot and monitor the system effectively. + +In the BeeAI Framework, the [Logger TODO]() class is an abstraction built on top of the built in python logger. + +## Basic Usage + +```py +``` + +_Source: /examples/logger/base.py TODO + +## Usage with Agents + +The [Logger](/beeai/logger/logger.py) seamlessly integrates with agents in the framework. Below is an example that demonstrates how logging can be used in conjunction with agents and event emitters. + +```py +``` + +_Source: /examples/logger/agent.py TODO diff --git a/python/docs/memory.md b/python/docs/memory.md new file mode 100644 index 00000000..d23c807f --- /dev/null +++ b/python/docs/memory.md @@ -0,0 +1,379 @@ +# Memory + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +> [!TIP] +> +> Location within the framework `beeai/memory`. + +Memory in the context of an agent refers to the system's capability to store, recall, and utilize information from past interactions. This enables the agent to maintain context over time, improve its responses based on previous exchanges, and provide a more personalized experience. + +## Usage + +### Capabilities showcase + + +```py +``` + +_Source: /examples/memory/base.py TODO + +### Usage with LLMs + +```py +``` + +_Source: /examples/memory/llmMemory.py TODO + +> [!TIP] +> +> Memory for non-chat LLMs works exactly the same way. + +### Usage with agents + + + +```py +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from beeai_framework.agents.bee.agent import BeeAgent +from beeai_framework.agents.types import BeeInput, BeeRunInput +from beeai_framework.backend.chat import ChatModel +from beeai_framework.backend.message import AssistantMessage, UserMessage +from beeai_framework.memory.unconstrained_memory import UnconstrainedMemory + +# Initialize the memory and LLM +memory = UnconstrainedMemory() + + +async def create_agent() -> BeeAgent: + llm = await ChatModel.from_name("ollama:granite3.1-dense:8b") + + # Initialize the agent + agent = BeeAgent(BeeInput(llm=llm, memory=memory, tools=[])) + + return agent + + +async def main() -> None: + try: + # Create user message + user_input = "Hello world!" + user_message = UserMessage(user_input) + + # Await adding user message to memory + await memory.add(user_message) + print("Added user message to memory") + + # Create agent + agent = await create_agent() + + response = await agent.run( + BeeRunInput( + prompt=user_input, + options={ + "execution": { + "max_retries_per_step": 3, + "total_max_retries": 10, + "max_iterations": 20, + } + }, + ) + ) + print(f"Received response: {response}") + + # Create and store assistant's response + assistant_message = AssistantMessage(response.result.text) + + # Await adding assistant message to memory + await memory.add(assistant_message) + print("Added assistant message to memory") + + # Print results + print(f"\nMessages in memory: {len(agent.memory.messages)}") + + if len(agent.memory.messages) >= 1: + user_msg = agent.memory.messages[0] + print(f"User: {user_msg.text}") + + if len(agent.memory.messages) >= 2: + agent_msg = agent.memory.messages[1] + print(f"Agent: {agent_msg.text}") + else: + print("No agent message found in memory") + + except Exception as e: + print(f"An error occurred: {e!s}") + import traceback + + print(traceback.format_exc()) + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +_Source: [examples/memory/agentMemory.py](/examples/memory/agentMemory.py)_ + +> [!TIP] +> +> If your memory already contains the user message, run the agent with `prompt: null`. + +> [!NOTE] +> +> Bee Agent internally uses `TokenMemory` to store intermediate steps for a given run. + +> [!NOTE] +> +> Agent typically works with a memory similar to what was just shown. + +## Memory types + +The framework provides multiple out-of-the-box memory implementations. + +### UnconstrainedMemory + +Unlimited in size. + + + +```py +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from beeai_framework.backend import Message, Role +from beeai_framework.memory import UnconstrainedMemory + + +async def main() -> None: + try: + # Create memory instance + memory = UnconstrainedMemory() + + # Add a message + await memory.add(Message.of({"role": Role.USER, "text": "Hello world!"})) + + # Print results + print(f"Is Empty: {memory.is_empty()}") # Should print: False + print(f"Message Count: {len(memory.messages)}") # Should print: 1 + + print("\nMessages:") + for msg in memory.messages: + print(f"{msg.role}: {msg.text}") + + except Exception as e: + print(f"An error occurred: {e!s}") + import traceback + + print(traceback.format_exc()) + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +_Source: [examples/memory/unconstrainedMemory.py](/examples/memory/unconstrainedMemory.py)_ + +### SlidingMemory + +Keeps last `k` entries in the memory. The oldest ones are deleted (unless specified otherwise). + + + +```py +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from beeai_framework.backend import Message, Role +from beeai_framework.memory.sliding_memory import SlidingMemory, SlidingMemoryConfig + + +async def main() -> None: + try: + # Create sliding memory with size 3 + memory = SlidingMemory( + SlidingMemoryConfig( + size=3, + handlers={"removal_selector": lambda messages: messages[0]}, # Remove oldest message + ) + ) + + # Add messages + await memory.add(Message.of({"role": Role.SYSTEM, "text": "You are a helpful assistant."})) + + await memory.add(Message.of({"role": Role.USER, "text": "What is Python?"})) + + await memory.add(Message.of({"role": Role.ASSISTANT, "text": "Python is a programming language."})) + + # Adding a fourth message should trigger sliding window + await memory.add(Message.of({"role": Role.USER, "text": "What about JavaScript?"})) + + # Print results + print(f"Messages in memory: {len(memory.messages)}") # Should print 3 + for msg in memory.messages: + print(f"{msg.role}: {msg.text}") + + except Exception as e: + print(f"An error occurred: {e!s}") + import traceback + + print(traceback.format_exc()) + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +_Source: [examples/memory/slidingMemory.py](/examples/memory/slidingMemory.py)_ + +### TokenMemory + +Ensures that the token sum of all messages is below the given threshold. +If overflow occurs, the oldest message will be removed. + + + +```py +# SPDX-License-Identifier: Apache-2.0 + +import asyncio +import math + +from beeai import LLM + +from beeai_framework.backend import Message, Role +from beeai_framework.memory import TokenMemory + +# Initialize the LLM +llm = LLM() + +# Initialize TokenMemory with handlers +memory = TokenMemory( + llm=llm, + max_tokens=None, # Will be inferred from LLM + capacity_threshold=0.75, + sync_threshold=0.25, + handlers={ + "removal_selector": lambda messages: next((msg for msg in messages if msg.role != Role.SYSTEM), messages[0]), + "estimate": lambda msg: math.ceil((len(msg.role) + len(msg.text)) / 4), + }, +) + + +async def main() -> None: + try: + # Add system message + system_message = Message.of({"role": Role.SYSTEM, "text": "You are a helpful assistant."}) + await memory.add(system_message) + print(f"Added system message (hash: {hash(system_message)})") + + # Add user message + user_message = Message.of({"role": Role.USER, "text": "Hello world!"}) + await memory.add(user_message) + print(f"Added user message (hash: {hash(user_message)})") + + # Check initial memory state + print("\nInitial state:") + print(f"Is Dirty: {memory.is_dirty}") + print(f"Tokens Used: {memory.tokens_used}") + + # Sync token counts + await memory.sync() + print("\nAfter sync:") + print(f"Is Dirty: {memory.is_dirty}") + print(f"Tokens Used: {memory.tokens_used}") + + # Print all messages + print("\nMessages in memory:") + for msg in memory.messages: + print(f"{msg.role}: {msg.text} (hash: {hash(msg)})") + + except Exception as e: + print(f"An error occurred: {e!s}") + import traceback + + print(traceback.format_exc()) + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +_Source: [examples/memory/tokenMemory.py](/examples/memory/tokenMemory.py)_ + +### SummarizeMemory + +Only a single summarization of the conversation is preserved. Summarization is updated with every new message. + + + +```py +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from beeai_framework.backend.chat import ChatModel +from beeai_framework.backend.message import AssistantMessage, SystemMessage, UserMessage +from beeai_framework.memory.summarize_memory import SummarizeMemory + + +async def main() -> None: + try: + # Initialize the LLM with parameters + llm = await ChatModel.from_name( + "ollama:granite3.1-dense:8b", + # ChatModelParameters(temperature=0\), + ) + + # Create summarize memory instance + memory = SummarizeMemory(llm) + + # Add messages + await memory.add_many( + [ + SystemMessage("You are a guide through France."), + UserMessage("What is the capital?"), + AssistantMessage("Paris"), + UserMessage("What language is spoken there?"), + ] + ) + + # Print results + print(f"Is Empty: {memory.is_empty()}") + print(f"Message Count: {len(memory.messages)}") + + if memory.messages: + print(f"Summary: {memory.messages[0].get_texts()[0].get('text')}") + + except Exception as e: + print(f"An error occurred: {e!s}") + import traceback + + print(traceback.format_exc()) + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +_Source: [examples/memory/summarizeMemory.py](/examples/memory/summarizeMemory.py)_ + +## Creating a custom memory provider + +To create your memory implementation, you must implement the `BaseMemory` class. + +```py +``` + +_Source: /examples/memory/custom.py TODO + +The simplest implementation is `UnconstrainedMemory`, which can be found [here](/beeai/memory/unconstrained_memory.py). diff --git a/python/docs/overview.md b/python/docs/overview.md new file mode 100644 index 00000000..9aaa58f2 --- /dev/null +++ b/python/docs/overview.md @@ -0,0 +1,67 @@ +# Overview + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +## ๐Ÿ“ฆ Modules + +The beeai directory (`beeai`) provides numerous modules that one can use. + +| Name | Description | +| ------------------------------------------- | ------------------------------------------------------------------------------------------- | +| [**agents**](./agents.md) | Base classes defining the common interface for agent. | +| [**backend**](/docs/backend.md) | Functionalities that relates to AI models (chat, embedding, image, tool calling, ...) | +| [**template**](./templates.md) | Prompt Templating system based on `Mustache` with various improvements. | +| [**memory**](./memory.md) | Various types of memories to use with agent. | +| [**tools**](./tools.md) | Tools that an agent can use. | +| [**cache**](./cache.md) | Preset of different caching approaches that can be used together with tools. | +| [**errors**](./errors.md) | Base framework error classes used by each module. | +| [**logger**](./logger.md) | Core component for logging all actions within the framework. | +| [**serializer**](./serialization.md) | Core component for the ability to serialize/deserialize modules into the serialized format. | +| [**version**](./version.md) | Constants representing the framework (e.g., the latest version) | +| [**emitter**](./emitter.md) | Bringing visibility to the system by emitting events. | +| [**instrumentation**](./instrumentation.md) | Integrate monitoring tools into your application. | +| **internals** | Modules used by other modules within the framework. | + +### Emitter + +Moved to a [standalone page](emitter.md). + +### Instrumentation + +Moved to a [standalone page](instrumentation.md). + +### LLMs + +Moved to a [standalone page](backend.md). + +### Templates + +Moved to a [standalone page](templates.md). + +### Agents + +Moved to a [standalone page](agents.md). + +### Memory + +Moved to a [standalone page](memory.md). + +### Tools + +Moved to a [standalone page](tools.md). + +### Cache + +Moved to a [standalone page](cache.md). + +### Errors + +Moved to a [standalone page](errors.md). + +### Logger + +Moved to a [standalone page](logger.md). + +### Serializer + +Moved to a [standalone page](serialization.md). diff --git a/python/docs/searxng-tool.md b/python/docs/searxng-tool.md new file mode 100644 index 00000000..bbb79591 --- /dev/null +++ b/python/docs/searxng-tool.md @@ -0,0 +1,5 @@ +# searxng-tool.md + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +Not yet implemented in the Python version. diff --git a/python/docs/serialization.md b/python/docs/serialization.md new file mode 100644 index 00000000..a56c6b00 --- /dev/null +++ b/python/docs/serialization.md @@ -0,0 +1,69 @@ +# Serialization + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +> [!TIP] +> +> Location within the framework `beeai/serializer`. + +Serialization is a process of converting complex data structures or objects into a format that can be easily stored, transmitted, and reconstructed later. +Serialization is a difficult task, and JavaScript does not provide a magic tool to serialize and deserialize an arbitrary input. That is why we made such one. + +```py +``` + +_Source: /examples/tools/base.py TODO + +> [!NOTE] +> +> Serializer knows how to serialize/deserialize the most well-known JavaScript data structures. Continue reading to see how to register your own. + +## Being Serializable + +Most parts of the framework implement the internal [`Serializable`](/beeai/internals/serializable.py) class, which exposes the following methods. + +- `createSnapshot` (returns an object that "snapshots" the current state) +- `loadSnapshot` (applies the provided snapshot to the current instance) + +- `fromSerialized` (static, creates the new instance from the given serialized input) +- `fromSnapshot` (static, creates the new instance from the given snapshot) + +See the direct usage on the following memory example. + +```py +``` + +_Source: /examples/serialization/memory.py TODO + +### Serializing unknowns + +If you want to serialize a class that the `Serializer` does not know, it throws the `SerializerError` error. +However, you can tell the `Serializer` how to work with your class by registering it as a serializable. + +```py +``` + +_Source: /examples/serialization/customExternal.py TODO + +or you can extend the `Serializable` class. + +```py +``` + +_Source: /examples/serialization/customInternal.py TODO + +> [!TIP] +> +> Most framework components are `Serializable`. + +### Context matters + +```py +``` + +_Source: /examples/serialization/context.py TODO + +> [!IMPORTANT] +> +> Ensuring that all classes are registered in advance can be annoying, but there's a good reason for that. +> If we imported all the classes for you, that would significantly increase your application's size and bootstrapping time + you would have to install all peer dependencies that you may not even need. diff --git a/python/docs/sql-tool.md b/python/docs/sql-tool.md new file mode 100644 index 00000000..86e9b934 --- /dev/null +++ b/python/docs/sql-tool.md @@ -0,0 +1,4 @@ +# ๐Ÿ›ข๏ธ SQLTool +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +Not yet implemented in Python. diff --git a/python/docs/templates.md b/python/docs/templates.md new file mode 100644 index 00000000..cfb81ff7 --- /dev/null +++ b/python/docs/templates.md @@ -0,0 +1,152 @@ +# Templates (Prompt Templates) + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +> [!TIP] +> +> Location within the framework `beeai/template`. + +**Template** is a predefined structure or format used to create consistent documents or outputs. It often includes placeholders for specific information that can be filled in later. + +**Prompt template**, on the other hand, is a specific type of template used in the context of language models or AI applications. +It consists of a structured prompt that guides the model in generating a response or output. The prompt often includes variables or placeholders for user input, which helps to elicit more relevant or targeted responses. + +The Framework exposes such functionality via the [`PromptTemplate TODO`]() class. + +> [!TIP] +> +> The Prompt Template concept is used anywhere - especially in our agents. + +## Usage + +### Primitives + +```py +``` + +_Source: /examples/templates/primitives.py TODO + +### Arrays + +```py +``` + +_Source: /examples/templates/arrays.py TODO + +### Objects + +```py +``` + +_Source: /examples/templates/objects.py TODO + +### Forking + +```py +``` + +_Source: /examples/templates/forking.py TODO + +### Functions + +```py +``` + +_Source: functions.py TODO + +### Agent Sys Prompt + + + +```py +from beeai_framework.agents.runners.default.prompts import ( + SystemPromptTemplate, + SystemPromptTemplateInput, + ToolDefinition, +) +from beeai_framework.tools.weather.openmeteo import OpenMeteoTool + +tool = OpenMeteoTool() + +# Render the granite system prompt +prompt = SystemPromptTemplate.render( + SystemPromptTemplateInput( + instructions="You are a helpful AI assistant!", tools=[ToolDefinition(**tool.prompt_data())], tools_length=1 + ) +) + +print(prompt) + +``` + +_Source: [examples/templates/agent_sys_prompt.py](/examples/templates/agent_sys_prompt.py)_ + +### Basic Functions + + + +```py +import os +from datetime import datetime +from zoneinfo import ZoneInfo + +from pydantic import BaseModel + +from beeai_framework.utils.templates import PromptTemplate + +os.environ["USER"] = "BeeAI" + + +class UserQuery(BaseModel): + query: str + + +template = PromptTemplate( + schema=UserQuery, + functions={ + "format_date": lambda: datetime.now(ZoneInfo("US/Eastern")).strftime("%A, %B %d, %Y at %I:%M:%S %p"), + "current_user": lambda: os.environ["USER"], + }, + template=""" +{{format_date}} +{{current_user}}: {{query}} +""", +) + +``` + +_Source: [examples/templates/basic_functions.py](/examples/templates/basic_functions.py)_ + +### Basic Template + + + +```py +from pydantic import BaseModel + +from beeai_framework.utils.templates import PromptTemplate + + +class UserMessage(BaseModel): + label: str + input: str + + +template = PromptTemplate( + schema=UserMessage, + template="""{{label}}: {{input}}""", +) + +prompt = template.render(UserMessage(label="Query", input="What interesting things happened on this day in history?")) + +print(prompt) + +``` + +_Source: [examples/templates/basic_template.py](/examples/templates/basic_template.py)_ + +## Agents + +The Bee Agent internally uses multiple prompt templates, and because now you know how to work with them, you can alter the agentโ€™s behavior. + +The internal prompt templates can be modified [here](/examples/agents/bee_advanced.py). diff --git a/python/docs/tools.md b/python/docs/tools.md new file mode 100644 index 00000000..d0d4e13f --- /dev/null +++ b/python/docs/tools.md @@ -0,0 +1,346 @@ +# Tools + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +> [!TIP] +> +> Location within the framework `beeai/tools`. + +Tools in the context of an agent refer to additional functionalities or capabilities integrated with the agent to perform specific tasks beyond text processing. + +These tools extend the agent's abilities, allowing it to interact with external systems, access information, and execute actions. + +## Built-in tools + +| Name | Description | +| ------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `PythonTool` | Run arbitrary Python code in the remote environment. | +| `WikipediaTool` | Search for data on Wikipedia. | +| `GoogleSearchTool` | Search for data on Google using Custom Search Engine. | +| `DuckDuckGoTool` | Search for data on DuckDuckGo. | +| [`SearXNGTool`](./searxng-tool.md) | Privacy-respecting, hackable metasearch engine. | +| [`SQLTool`](./sql-tool.md) | Execute SQL queries against relational databases. | +| `ElasticSearchTool` | Perform search or aggregation queries against an ElasticSearch database. | +| `CustomTool` | Run your own Python function in the remote environment. | +| `LLMTool` | Use an LLM to process input data. | +| `DynamicTool` | Construct to create dynamic tools. | +| `ArXivTool` | Retrieve research articles published on arXiv. | +| `WebCrawlerTool` | Retrieve content of an arbitrary website. | +| `OpenMeteoTool` | Retrieve current, previous, or upcoming weather for a given destination. | +| `MilvusDatabaseTool` | Perform retrieval queries (search, insert, delete, manage collections) against a MilvusDatabaseTool database. | +| `OpenAPITool` | Send requests to and receive responses from API server. | +| `MCPTool` | Discover and use tools exposed by arbitrary [MCP Server](https://modelcontextprotocol.io/examples). | +| โž• [Request](https://github.com/i-am-bee/bee-py/discussions) | | + +All examples can be found [here](/examples/tools). + +> [!TIP] +> +> Would you like to use a tool from LangChain? See the [example](/examples/tools/langchain.py). + +## Usage + +### Basic + +```py +``` + +_Source: /examples/tools/base.py TODO + +### Advanced + +```py +``` + +_Source: /examples/tools/advanced.py TODO + +> [!TIP] +> +> To learn more about caching, refer to the [Cache documentation page](./cache.md). + +### Usage with agents + +```py +``` + +_Source: agent.py TODO + +### Usage with decorator + + + +```py +# SPDX-License-Identifier: Apache-2.0 + +import asyncio +import json +from urllib.parse import quote + +import requests +from beeai import BeeAgent, tool + +from beeai_framework.agents.types import BeeInput, BeeRunInput +from beeai_framework.backend.chat import ChatModel +from beeai_framework.memory.unconstrained_memory import UnconstrainedMemory +from beeai_framework.tools.tool import StringToolOutput +from beeai_framework.utils import BeeLogger + +logger = BeeLogger(__name__) + + +# defining a tool using the `tool` decorator +@tool +def basic_calculator(expression: str) -> int: + """ + A calculator tool that performs mathematical operations. + + Args: + expression: The mathematical expression to evaluate (e.g., "2 + 3 * 4"). + + Returns: + The result of the mathematical expression + """ + try: + encoded_expression = quote(expression) + math_url = f"https://newton.vercel.app/api/v2/simplify/{encoded_expression}" + + response = requests.get( + math_url, + headers={"Accept": "application/json"}, + ) + response.raise_for_status() + + return StringToolOutput(json.dumps(response.json())) + except Exception as e: + raise RuntimeError(f"Error evaluating expression: {e!s}") from Exception + + +async def main() -> None: + # using the tool in an agent + + chat_model = await ChatModel.from_name("ollama:granite3.1-dense:8b") + + agent = BeeAgent(BeeInput(llm=chat_model, tools=[basic_calculator], memory=UnconstrainedMemory())) + + result = await agent.run(BeeRunInput(prompt="What is the square root of 36?")) + + print(result.result.text) + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +_Source: [examples/tools/decorator.py](/examples/tools/decorator.py)_ + +### Usage with duckduckgo + + + +```py +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from beeai_framework.agents.bee import BeeAgent +from beeai_framework.agents.types import BeeInput, BeeRunInput +from beeai_framework.backend.chat import ChatModel +from beeai_framework.memory import UnconstrainedMemory +from beeai_framework.tools.search.duckduckgo import DuckDuckGoSearchTool + + +async def main() -> None: + chat_model = await ChatModel.from_name("ollama:granite3.1-dense:8b") + agent = BeeAgent(BeeInput(llm=chat_model, tools=[DuckDuckGoSearchTool()], memory=UnconstrainedMemory())) + + result = await agent.run(BeeRunInput(prompt="How tall is the mount Everest?")) + + print(result.result.text) + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +_Source: [examples/tools/duckduckgo.py](/examples/tools/duckduckgo.py)_ + +### Usage with openmeteo + + + +```py +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from beeai_framework.agents.bee import BeeAgent +from beeai_framework.agents.types import BeeInput, BeeRunInput +from beeai_framework.backend.chat import ChatModel +from beeai_framework.memory import UnconstrainedMemory +from beeai_framework.tools.weather.openmeteo import OpenMeteoTool + + +async def main() -> None: + llm = await ChatModel.from_name("ollama:granite3.1-dense:8b") + agent = BeeAgent(BeeInput(llm=llm, tools=[OpenMeteoTool()], memory=UnconstrainedMemory())) + + result = await agent.run(BeeRunInput(prompt="What's the current weather in Las Vegas?")) + + print(result.result.text) + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +_Source: [examples/tools/openmeteo.py](/examples/tools/openmeteo.py)_ + +## Writing a new tool + +To create a new tool, you have the following options on how to do that: + +- Implement the base [`Tool TODO`]() class. +- Initiate the [`DynamicTool TODO`]() by passing your own handler (function) with the `name`, `description` and `input schema`. +- Initiate the [`CustomTool TODO`]() by passing your own Python function (code interpreter needed). + +### Implementing the `Tool` class + +The recommended and most sustainable way to create a tool is by implementing the base `Tool` class. + +#### Basic + +```py +``` + +_Source: /examples/tools/custom/base.py TODO + +> [!TIP] +> +> `inputSchema` can be asynchronous. + +> [!TIP] +> +> If you want to return an array or a plain object, use `JSONToolOutput` or implement your own. + +#### Advanced + +If your tool is more complex, you may want to use the full power of the tool abstraction, as the following example shows. + +```py +``` + +_Source: examples/tools/custom/openLibrary.py TODO + +#### Implementation Notes + +- **Implement the `Tool` class:** + + - `MyNewToolOutput` is required, must be an implementation of `ToolOutput` such as `StringToolOutput` or `JSONToolOutput`. + + - `ToolOptions` is optional (default BaseToolOptions), constructor parameters that are passed during tool creation + + - `ToolRunOptions` is optional (default BaseToolRunOptions), optional parameters that are passed to the run method + +- **Be given a unique name:** + + Note: Convention and best practice is to set the tool's name to the name of its class + + ```pt + name = "MyNewTool" + ``` + +- **Provide a natural language description of what the tool does:** + + โ—Important: the agent uses this description to determine when the tool should be used. It's probably the most important aspect of your tool and you should experiment with different natural language descriptions to ensure the tool is used in the correct circumstances. You can also include usage tips and guidance for the agent in the description, but + its advisable to keep the description succinct in order to reduce the probability of conflicting with other tools, or adversely affecting agent behavior. + + ```py + description = "Takes X action when given Y input resulting in Z output" + ``` + +- **Declare an input schema:** + + This is used to define the format of the input to your tool. The agent will formalise the natural language input(s) it has received and structure them into the fields described in the tool's input. The input schema can be specified using [Zod](https://github.com/colinhacks/zod) (recommended) or JSONSchema. It must be a function (either sync or async). Zod effects (e.g. `z.object().transform(...)`) are not supported. The return value of `inputSchema` must always be an object and pass validation by the `validateSchema()` function defined in [schema.py TODO](). Keep your tool input schema simple and provide schema descriptions to help the agent to interpret fields. + + ```txt + Coming soon + ``` + +- **Implement initialisation:** + + The unnamed static block is executed when your tool is called for the first time. It is used to register your tool as `serializable` (you can then use the `serialize()` method). + + + ```txt + Coming soon + ``` + +- **Implement the `_run()` method:** + + + ```txt + Coming soon + ``` + +### Using the `DynamicTool` class + +The `DynamicTool` allows you to create a tool without extending the base tool class. + +```py +``` + +_Source: /examples/tools/custom/dynamic.py TODO + +The `name` of the tool is required and must only contain characters between +a-z, A-Z, 0-9, or one of - or \_. +The `inputSchema` and `description` are also both required. + +### Using the `CustomTool` (Python functions) + +If you want to use the Python function, use the [`CustomTool`](/beeai/tools/custom.py). + +```py +``` + +_Source: /examples/tools/custom/python.py TODO + +> [!TIP] +> +> Environmental variables can be overridden (or defined) in the following ways: +> +> 1. During the creation of a `CustomTool`, either via the constructor or the factory function (`CustomTool.fromSourceCode`). +> 2. By passing them directly as part of the options when invoking: `myTool.run({ ... }, { env: { MY_ENV: 'MY_VALUE' } })`. +> 3. Dynamically during execution via [`Emitter`](/docs/emitter.md): `myTool.emitter.on("start", ({ options }) => { options.env.MY_ENV = 'MY_VALUE'; })`. + +> [!IMPORTANT] +> +> Custom tools are executed within the code interpreter, but they cannot access any files. +> Only `PythonTool` does. + +### Using the `MCPTool` class + +The `MCPTool` allows you to instantiate tools given a connection to [MCP server](https://modelcontextprotocol.io/examples) with tools capability. + +```py +``` + +_Source: /examples/tools/mcp.py TODO + +## General Tips + +### Data Minimization + +If your tool is providing data to the agent, try to ensure that the data is relevant and free of extraneous metatdata. Preprocessing data to improve relevance and minimize unnecessary data conserves agent memory, improving overall performance. + +### Provide Hints + +If your tool encounters an error that is fixable, you can return a hint to the agent; the agent will try to reuse the tool in the context of the hint. This can improve the agent's ability +to recover from errors. + +### Security & Stability + +When building tools, consider that the tool is being invoked by a somewhat unpredictable third party (the agent). You should ensure that sufficient guardrails are in place to prevent +adverse outcomes. diff --git a/python/docs/tutorials.md b/python/docs/tutorials.md new file mode 100644 index 00000000..6689ddc2 --- /dev/null +++ b/python/docs/tutorials.md @@ -0,0 +1,3 @@ +# BeeAI Framework Tutorials + +Coming soon diff --git a/python/docs/version.md b/python/docs/version.md new file mode 100644 index 00000000..eb1496af --- /dev/null +++ b/python/docs/version.md @@ -0,0 +1,20 @@ +# Version + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +> [!TIP] +> +> Location within the framework `beeai/version`. + +```py +``` + +_Source: /examples/version.py TODO + +> [!NOTE] +> +> If you develop the framework locally, the version will always be `0.0.0`. + +> [!NOTE] +> +> The framework's serializer attaches the framework's version to its metadata. Read more about [Serialization](./serialization.md). diff --git a/python/docs/workflows.md b/python/docs/workflows.md new file mode 100644 index 00000000..8c86564b --- /dev/null +++ b/python/docs/workflows.md @@ -0,0 +1,372 @@ +# Workflows (experimental) + +*Disclaimer: The notes below may refer to the TypeScript version or missing files as the Python version moves toward parity in the near future. Additional Python examples coming soon. TODO* + +> [!TIP] +> +> Location within the framework `beeai/workflows`. + +Workflows provide a flexible and extensible component for managing and executing structured sequences of tasks. + +- Dynamic Execution: Steps can direct the flow based on state or results. +- Validation: Define schemas for data consistency and type safety. +- Modularity: Steps can be standalone or invoke nested workflows. +- Observability: Emit events during execution to track progress or handle errors. + +## Usage + +### Basic + +```py +``` + +_Source: /examples/workflows/basic.py TODO + +### Simple + + + +```py +import asyncio +import traceback + +from pydantic import BaseModel, ValidationError + +from beeai_framework.workflows.workflow import Workflow, WorkflowError + + +async def main() -> None: + # State + class State(BaseModel): + input: str + + try: + workflow = Workflow(State) + workflow.add_step("first", lambda state: print("Running first step!")) + workflow.add_step("second", lambda state: print("Running second step!")) + workflow.add_step("third", lambda state: print("Running third step!")) + + await workflow.run(State(input="Hello")) + + except WorkflowError: + traceback.print_exc() + except ValidationError: + traceback.print_exc() + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +_Source: [examples/workflows/simple.py](/examples/workflows/simple.py)_ + +### Advanced + + + +```py +import asyncio +from typing import Literal, TypeAlias + +from pydantic import BaseModel, ValidationError + +from beeai_framework.workflows.workflow import Workflow, WorkflowError, WorkflowReservedStepName + + +async def main() -> None: + # State + class State(BaseModel): + x: int + y: int + abs_repetitions: int | None = None + result: int | None = None + + WorkflowStep: TypeAlias = Literal["pre_process", "add_loop", "post_process"] + + def pre_process(state: State) -> WorkflowStep: + print("pre_process") + state.abs_repetitions = abs(state.y) + return "add_loop" + + def add_loop(state: State) -> WorkflowStep | WorkflowReservedStepName: + if state.abs_repetitions and state.abs_repetitions > 0: + result = (state.result if state.result is not None else 0) + state.x + abs_repetitions = (state.abs_repetitions if state.abs_repetitions is not None else 0) - 1 + print(f"add_loop: intermediate result {result}") + state.abs_repetitions = abs_repetitions + state.result = result + return Workflow.SELF + else: + return "post_process" + + def post_process(state: State) -> WorkflowReservedStepName: + print("post_process") + if state.y < 0: + result = -(state.result if state.result is not None else 0) + state.result = result + return Workflow.END + + try: + multiplication_workflow = Workflow[State, WorkflowStep](name="MultiplicationWorkflow", schema=State) + multiplication_workflow.add_step("pre_process", pre_process) + multiplication_workflow.add_step("add_loop", add_loop) + multiplication_workflow.add_step("post_process", post_process) + + response = await multiplication_workflow.run(State(x=8, y=5)) + print(f"result: {response.state.result}") + + response = await multiplication_workflow.run(State(x=8, y=-5)) + print(f"result: {response.state.result}") + + except WorkflowError as e: + print(e) + except ValidationError as e: + print(e) + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +_Source: [examples/workflows/simple.py](/examples/workflows/advanced.py)_ + + +### Nesting + +```py +``` + +_Source: /examples/workflows/nesting.py TODO + +### Agent Delegation + +```py +``` + +_Source: /examples/workflows/agent.py TODO + +### Memory + + + +```py +import asyncio +import traceback + +from pydantic import BaseModel, InstanceOf, ValidationError + +from beeai_framework.backend.message import AssistantMessage, UserMessage +from beeai_framework.memory.unconstrained_memory import UnconstrainedMemory +from beeai_framework.workflows.workflow import Workflow, WorkflowError + + +async def main() -> None: + # State with memory + class State(BaseModel): + memory: InstanceOf[UnconstrainedMemory] + output: str | None = None + + async def echo(state: State) -> str: + # Get the last message in memory + last_message = state.memory.messages[-1] + state.output = last_message.text[::-1] + return Workflow.END + + try: + memory = UnconstrainedMemory() + workflow = Workflow(State) + workflow.add_step("echo", echo) + + while True: + # Add user message to memory + await memory.add(UserMessage(content=input("User: "))) + # Run workflow with memory + response = await workflow.run(State(memory=memory)) + # Add assistant response to memory + await memory.add(AssistantMessage(content=response.state.output)) + + print("Assistant: ", response.state.output) + except WorkflowError: + traceback.print_exc() + except ValidationError: + traceback.print_exc() + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +_Source: [examples/workflows/memory.py](/examples/workflows/memory.py)_ + +### Memory + + + +```py +import asyncio +import traceback + +from pydantic import BaseModel, InstanceOf, ValidationError + +from beeai_framework.backend.message import AssistantMessage, UserMessage +from beeai_framework.memory.unconstrained_memory import UnconstrainedMemory +from beeai_framework.workflows.workflow import Workflow, WorkflowError + + +async def main() -> None: + # State with memory + class State(BaseModel): + memory: InstanceOf[UnconstrainedMemory] + output: str | None = None + + async def echo(state: State) -> str: + # Get the last message in memory + last_message = state.memory.messages[-1] + state.output = last_message.text[::-1] + return Workflow.END + + try: + memory = UnconstrainedMemory() + workflow = Workflow(State) + workflow.add_step("echo", echo) + + while True: + # Add user message to memory + await memory.add(UserMessage(content=input("User: "))) + # Run workflow with memory + response = await workflow.run(State(memory=memory)) + # Add assistant response to memory + await memory.add(AssistantMessage(content=response.state.output)) + + print("Assistant: ", response.state.output) + except WorkflowError: + traceback.print_exc() + except ValidationError: + traceback.print_exc() + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +_Source: [examples/workflows/memory.py](/examples/workflows/memory.py)_ + +### Web Agent + + + +```py +import asyncio +import sys +import traceback + +from langchain_community.utilities import SearxSearchWrapper +from pydantic import BaseModel, Field, ValidationError + +from beeai_framework.adapters.ollama.backend.chat import OllamaChatModel +from beeai_framework.backend.chat import ChatModelOutput, ChatModelStructureOutput +from beeai_framework.backend.message import UserMessage +from beeai_framework.utils.templates import PromptTemplate +from beeai_framework.workflows.workflow import Workflow, WorkflowError + + +async def main() -> None: + llm = OllamaChatModel("granite3.1-dense:8b") + search = SearxSearchWrapper(searx_host="http://127.0.0.1:8888") + + class State(BaseModel): + input: str + search_results: str | None = None + output: str | None = None + + class InputSchema(BaseModel): + input: str + + class WebSearchQuery(BaseModel): + search_query: str = Field(description="Search query.") + + class RAGSchema(InputSchema): + input: str + search_results: str + + async def web_search(state: State) -> str: + print("Step: ", sys._getframe().f_code.co_name) + prompt = PromptTemplate( + schema=InputSchema, + template=""" + Please create a web search query for the following input. + Query: {{input}}""", + ).render(InputSchema(input=state.input)) + + output: ChatModelStructureOutput = await llm.create_structure( + { + "schema": WebSearchQuery, + "messages": [UserMessage(prompt)], + } + ) + # TODO Why is object not of type schema T? + state.search_results = search.run(f"current weather in {output.object['search_query']}") + return Workflow.NEXT + + async def generate_output(state: State) -> str: + print("Step: ", sys._getframe().f_code.co_name) + + prompt = PromptTemplate( + schema=RAGSchema, + template=""" + Use the following search results to answer the query accurately. If the results are irrelevant or insufficient, say 'I don't know.' + + Search Results: + {{search_results}} + + Query: {{input}} + """, # noqa: E501 + ).render(RAGSchema(input=state.input, search_results=state.search_results or "No results available.")) + + output: ChatModelOutput = await llm.create({"messages": [UserMessage(prompt)]}) + state.output = output.get_text_content() + return Workflow.END + + try: + # Define the structure of the workflow graph + workflow = Workflow(State) + workflow.add_step("web_search", web_search) + workflow.add_step("generate_output", generate_output) + + # Execute the workflow + result = await workflow.run(State(input="What is the demon core?")) + + print("\n*********************") + print("Input: ", result.state.input) + print("Agent: ", result.state.output) + + except WorkflowError: + traceback.print_exc() + except ValidationError: + traceback.print_exc() + + +if __name__ == "__main__": + asyncio.run(main()) + +``` + +_Source: [examples/workflows/web_agent.py](/examples/workflows/web_agent.py)_ + +### Multi-agent Content Creator + +```py +``` + +_Source: /examples/workflows/contentCreator.py TODO + +### Multi Agents Workflows + +```py +``` + +_Source: /examples/workflows/multiAgents.py TODO diff --git a/python/examples/README.md b/python/examples/README.md new file mode 100644 index 00000000..70622343 --- /dev/null +++ b/python/examples/README.md @@ -0,0 +1,145 @@ +# BeeAI Framework Examples + +This repository contains examples demonstrating the usage of the BeeAI Framework, a toolkit for building AI agents and applications. + +## Table of Contents + +1. [Agents](#agents) +2. [Workflows](#workflows) +3. [Cache](#cache) +4. [Errors](#errors) +5. [Helpers](#helpers) +6. [LLMs (Language Models)](#llms-language-models) +7. [Logger](#logger) +8. [Memory](#memory) +9. [Serialization](#serialization) +10. [Templates](#templates) +11. [Tools](#tools) + +## Agents + +- [`bee.py`](/examples/agents/bee.py): Basic Bee Agent implementation +- [`bee_advanced.py`](/examples/agents/bee_advanced.py): Advanced Bee Agent with custom configurations +- [`bee_reusable.py`](/examples/agents/bee_reusable.py): Demonstration of serializing and reusing Bee Agents +- [`custom_agent.py`](/examples/agents/custom_agent.py): Example of creating a custom agent +- [`granite_bee.py`](/examples/agents/granite/granite_bee.py): Basic Bee Agent using an IBM Granite LLM +- [`granite_wiki_bee.py`](/examples/agents/granite/granite_wiki_bee.py): Advanced Bee Agent using an IBM Granite LLM with wikipedia retrieval +- [`simple.py`](/examples/agents/simple.py): Simple agent implementation +- [`sql.py`](/examples/agents/sql.py): Agent for SQL-related tasks + +## Workflows + +- [`simple.py`](/examples/workflows/simple.py): Introduction to workflows +- [`nesting.py`](/examples/workflows/nesting.py): How to nest workflows +- [`agent.py`](/examples/workflows/agent.py): Using workflows to interconnect two agents with a critique step. +- [`multiAgents.py`](/examples/workflows/multiAgents.py): Multi-step sequential agentic workflow. +- [`contentCreator.py`](/examples/workflows/contentCreator.py): Multi-step workflow for writing blog posts. + +## Cache + +- [`cacheFn.py`](/examples/cache/cacheFn.py): Function caching example +- [`custom.py`](/examples/cache/custom.py): Custom cache implementation +- [`decoratorCache.py`](/examples/cache/decoratorCache.py): Cache decorator usage +- [`decoratorCacheComplex.py`](/examples/cache/decoratorCacheComplex.py): Complex cache decorator example +- [`fileCache.py`](/examples/cache/fileCache.py): File-based caching +- [`fileCacheCustomProvider.py`](/examples/cache/fileCacheCustomProvider.py): Custom provider for file cache +- [`slidingCache.py`](/examples/cache/slidingCache.py): Sliding window cache implementation +- [`toolCache.py`](/examples/cache/toolCache.py): Caching for tools +- [`unconstrainedCache.py`](/examples/cache/unconstrainedCache.py): Unconstrained cache example +- [`unconstrainedCacheFunction.py`](/examples/cache/unconstrainedCacheFunction.py): Function using unconstrained cache + +## Errors + +- [`base.py`](/examples/errors/base.py): Basic error handling +- [`cast.py`](/examples/errors/cast.py): Error casting example +- [`tool.py`](/examples/errors/tool.py): Tool-specific error handling + +## Helpers + +- [`io.py`](/examples/helpers/io.py): Input/Output helpers +- [`setup.py`](/examples/helpers/setup.py): Setup utilities + +## LLMs (Language Models) + +- [`chat.py`](/examples/backend/chat.py): Chat-based language model usage +- [`chatCallback.py`](/examples/backend/chatStream.py): Callbacks for chat models +- [`structured.py`](/examples/backend/structured.py): Structured output from language models + +### LLM Providers + +- [`ollama.py`](/examples/backend/providers/ollama.py): Ollama model usage +- [`watsonx.py`](/examples/backend/providers/watsonx.py): Watsonx integration + +## Logger + +- [`agent.py`](/examples/logger/agent.py): Agent-specific logging +- [`base.py`](/examples/logger/base.py): Basic logging setup +- [`pino.py`](/examples/logger/pino.py): Pino logger integration + +## Memory + +- [`agentMemory.py`](/examples/memory/agentMemory.py): Memory management for agents +- [`custom.py`](/examples/memory/custom.py): Custom memory implementation +- [`llmMemory.py`](/examples/memory/llmMemory.py): Memory for language models +- [`slidingMemory.py`](/examples/memory/slidingMemory.py): Sliding window memory +- [`summarizeMemory.py`](/examples/memory/summarizeMemory.py): Memory with summarization +- [`tokenMemory.py`](/examples/memory/tokenMemory.py): Token-based memory +- [`unconstrainedMemory.py`](/examples/memory/unconstrainedMemory.py): Unconstrained memory example + +## Serialization + +- [`base.py`](/examples/serialization/base.py): Basic serialization +- [`context.py`](/examples/serialization/context.py): Context serialization +- [`customExternal.py`](/examples/serialization/customExternal.py): Custom external serialization +- [`customInternal.py`](/examples/serialization/customInternal.py): Custom internal serialization +- [`memory.py`](/examples/serialization/memory.py): Memory serialization + +## Templates + +- [`arrays.py`](/examples/templates/arrays.py): Array-based templates +- [`forking.py`](/examples/templates/forking.py): Template forking +- [`functions.py`](/examples/templates/functions.py): Function-based templates +- [`objects.py`](/examples/templates/objects.py): Object-based templates +- [`primitives.py`](/examples/templates/primitives.py): Primitive data type templates + +## Tools + +- [`advanced.py`](/examples/tools/advanced.py): Advanced tool usage +- [`agent.py`](/examples/tools/agent.py): Agent-specific tools +- [`base.py`](/examples/tools/base.py): Basic tool implementation +- [`mcp.py`](/examples/tools/mcp.py): MCP tool usage + +### Custom Tools + +- [`base.py`](/examples/tools/custom/base.py): Custom tool base implementation +- [`dynamic.py`](/examples/tools/custom/dynamic.py): Dynamic tool creation +- [`openLibrary.py`](/examples/tools/custom/openLibrary.py): OpenLibrary API tool +- [`python.py`](/examples/tools/custom/python.py): Python-based custom tool + +- [`langchain.py`](/examples/tools/langchain.py): LangChain tool integration + +## Usage + +To run these examples, make sure you have the BeeAI Framework cloned and properly configured. Each file demonstrates a specific feature or use case of the framework. You can run individual examples using Python. + +1. Clone the repository: + ```bash + git clone git@github.com:i-am-bee/beeai-framework + ``` +2. Install dependencies: + ```bash + pip install . + ``` +3. Create `.env` file (from `.env.template`) and fill in missing values (if any). + +4. Run an arbitrary example, use the following command: + + ```bash + python examples/path/to/example.py + ``` + +For more detailed information on the BeeAI Framework, please refer to the [documentation](/docs/README.md). + +> [!TIP] +> +> To run examples that use Ollama, be sure that you have installed [Ollama](https://ollama.com) with the [llama3.1](https://ollama.com/library/llama3.1) model downloaded. diff --git a/python/examples/agents/README.md b/python/examples/agents/README.md new file mode 100644 index 00000000..133c29c0 --- /dev/null +++ b/python/examples/agents/README.md @@ -0,0 +1,7 @@ +# Examples how use BeeAI for Python Framework + +Be sure that you install all the potential dependencies before run this examples. + +```bash +pip install -r examples/beeai/agents/requirements.txt +``` diff --git a/python/examples/agents/bee.py b/python/examples/agents/bee.py new file mode 100644 index 00000000..af76dc32 --- /dev/null +++ b/python/examples/agents/bee.py @@ -0,0 +1,162 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio +import json +import logging +import os +from typing import Any + +from dotenv import load_dotenv + +# Import LangChain's Wikipedia tool from community package +from langchain_community.tools import WikipediaQueryRun +from langchain_community.utilities import WikipediaAPIWrapper +from pydantic import BaseModel, Field + +from beeai_framework.agents.bee.agent import BeeAgent +from beeai_framework.agents.types import BeeInput, BeeRunInput +from beeai_framework.backend.chat import ChatModel +from beeai_framework.emitter.emitter import Emitter +from beeai_framework.emitter.types import EmitterOptions +from beeai_framework.memory.token_memory import TokenMemory +from beeai_framework.tools.tool import StringToolOutput, Tool +from beeai_framework.utils.custom_logger import BeeLogger + +# Load environment variables +load_dotenv() + +# Configure logging - using DEBUG instead of trace +logger = BeeLogger("app", level=logging.DEBUG) + + +def get_env_var(key: str, default: str | None = None) -> str: + """Helper function to get environment variables with defaults""" + return os.getenv(key, default) + + +class LangChainWikipediaToolInput(BaseModel): + query: str = Field(description="The topic or question to search for on Wikipedia.") + + +class LangChainWikipediaTool(Tool): + """Adapter class to integrate LangChain's Wikipedia tool with our framework""" + + name = "Wikipedia" + description = "Search factual and historical information from Wikipedia about given topics." + input_schema = LangChainWikipediaToolInput + + def __init__(self) -> None: + super().__init__() + wikipedia = WikipediaAPIWrapper() + self.wikipedia = WikipediaQueryRun(api_wrapper=wikipedia) + + def _run(self, input: LangChainWikipediaToolInput, _: Any | None = None) -> None: + query = input.query + try: + result = self.wikipedia.run(query) + return StringToolOutput(json.dumps(result)) + except Exception as e: + logger.error(f"Wikipedia search error: {e!s}") + return f"Error searching Wikipedia: {e!s}" + + +async def create_agent() -> BeeAgent: + """Create and configure the agent with tools and LLM""" + + llm = await ChatModel.from_name( + "ollama:granite3.1-dense:8b", + # ChatModelParameters(temperature=0, presence_penalty=1.0), + ) + + # Configure tools with LangChain's Wikipedia tool + # tools = [LangChainWikipediaTool(), OpenMeteoTool()] + tools = [LangChainWikipediaTool()] + + # Add code interpreter tool if URL is configured + code_interpreter_url = get_env_var("CODE_INTERPRETER_URL") + if code_interpreter_url: + # Note: Python tool implementation would go here + pass + + # Create agent with memory and tools + agent = BeeAgent(BeeInput(llm=llm, tools=tools, memory=TokenMemory(llm))) + + return agent + + +async def process_agent_events(event_data: dict[str, Any] | None, event_meta: dict[str, Any]) -> None: + """Process agent events and log appropriately""" + + if event_meta.name == "start": + logger.info("Agent started") + elif event_meta.name == "error": + logger.info(f"Agent error: {event_data['error']}") + elif event_meta.name == "retry": + logger.info("Agent: retrying the action...") + elif event_meta.name == "update": + update = event_data["update"] + logger.info(f"Agent ({update.get('key')}): {update.get('value')}") + elif event_meta.name == "finish": + logger.info("Agent finished") + + +async def observer(emmitter: Emitter) -> None: + emmitter.on("*.*", process_agent_events, EmitterOptions(match_nested=True)) + + +async def main() -> None: + """Main application loop""" + + try: + # Create agent + agent = await create_agent() + + # Log code interpreter status if configured + code_interpreter_url = get_env_var("CODE_INTERPRETER_URL") + if code_interpreter_url: + logger.info( + f"๐Ÿ› ๏ธ System: The code interpreter tool is enabled. Please ensure that it is running on {code_interpreter_url}" # noqa: E501 + ) + + logger.info("Agent initialized with LangChain Wikipedia tool. Type 'exit' or 'quit' to end.") + + # Main interaction loop + while True: + try: + # Get user input + prompt = input("\nUser: ").strip() + if not prompt: + continue + + if prompt.lower() in ["exit", "quit"]: + break + + # Run agent with the prompt + result = await agent.run( + BeeRunInput( + prompt=prompt, + options={ + "execution": { + "max_retries_per_step": 3, + "total_max_retries": 10, + "max_iterations": 20, + } + }, + ) + ).observe(observer) + + print(f"Received response: {result.result.text}") + + except KeyboardInterrupt: + logger.info("\nExiting...") + break + except Exception as e: + logger.error(f"Error processing prompt: {e!s}") + + except Exception as e: + logger.error(f"Application error: {e!s}") + + +if __name__ == "__main__": + # Run the async main function + asyncio.run(main()) diff --git a/python/examples/agents/bee_advanced.py b/python/examples/agents/bee_advanced.py new file mode 100644 index 00000000..67fef6b4 --- /dev/null +++ b/python/examples/agents/bee_advanced.py @@ -0,0 +1,195 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio +import json +import logging +import signal +from typing import Any + +from dotenv import load_dotenv +from pydantic import BaseModel, Field + +from beeai_framework.agents import BeeAgent +from beeai_framework.llms import LLM +from beeai_framework.memory import UnconstrainedMemory +from beeai_framework.tools import OpenMeteoTool, Tool +from beeai_framework.utils import BeeEventEmitter, BeeLogger, MessageEvent + +# Load environment variables +load_dotenv() + +# Configure logging +logger = BeeLogger("app", level=logging.DEBUG) +event_emitter = BeeEventEmitter() + + +class DuckDuckGoSearchType: + STRICT = "STRICT" + MODERATE = "MODERATE" + OFF = "OFF" + + +class DuckDuckGoSearchToolInput(BaseModel): + query: str = Field(description="The search query.") + + +class DuckDuckGoSearchTool(Tool): + """DuckDuckGo search tool implementation""" + + name = "DuckDuckGoSearch" + description = "Search for information on the web using DuckDuckGo" + input_schema = DuckDuckGoSearchToolInput + + def __init__(self, max_results: int = 10, safe_search: str = DuckDuckGoSearchType.STRICT) -> None: + super().__init__() + self.max_results = max_results + self.safe_search = safe_search + + def _run(self, input: DuckDuckGoSearchToolInput, _: Any | None = None) -> None: + try: + # Ensure input is properly formatted + if isinstance(input, str): + input = json.loads(input) + + if not input.query: + return "Error: No search query provided" + + # Here you would implement the actual DuckDuckGo search + # For now, return a mock response + return { + "results": [ + { + "title": f"Search result for: {input.query}", + "link": "https://example.com", + "snippet": f"This is a mock search result for the query: {input.query}", + } + ], + "total": 1, + } + except json.JSONDecodeError as e: + logger.error(f"JSON parsing error: {e!s}") + return f"Error parsing search input: {e!s}" + except Exception as e: + logger.error(f"Search error: {e!s}") + return f"Error performing search: {e!s}" + + +def create_agent() -> BeeAgent: + """Create and configure the agent with custom tools and prompts""" + + # Initialize LLM + llm = LLM( + model="llama3.1", + parameters={ + "temperature": 0, + "repeat_penalty": 1.0, + "num_predict": 2048, + }, + ) + + # Configure tools + tools = [ + DuckDuckGoSearchTool(max_results=10, safe_search=DuckDuckGoSearchType.STRICT), + OpenMeteoTool(), + ] + + # Create agent with custom configuration + agent = BeeAgent(llm=llm, tools=tools, memory=UnconstrainedMemory()) + + return agent + + +async def handle_tool_response(response: Any, tool_name: str) -> str: + """Handle tool response and emit appropriate events""" + try: + if isinstance(response, dict | list): + response_str = json.dumps(response, ensure_ascii=False, indent=2) + else: + response_str = str(response) + + event_emitter.emit(MessageEvent(source="Agent", message=response_str, state=f"tool_response_{tool_name}")) + + return response_str + except Exception as e: + logger.error(f"Error handling tool response: {e!s}") + event_emitter.emit(MessageEvent(source="Agent", message=str(e), state="error")) + return str(e) + + +async def run_agent() -> None: + """Main function to run the agent""" + + try: + # Create agent + agent = create_agent() + print("Agent initialized with custom tools and prompts. Type 'exit' or 'quit' to end.") + + # Main interaction loop + while True: + try: + # Get user input + prompt = input("\nUser: ").strip() + if not prompt: + continue + + if prompt.lower() in ["exit", "quit"]: + break + + # Emit user message event + # event_emitter.emit(MessageEvent(source="User", message=prompt)) + + # Run agent with timeout + try: + # Set timeout signal + signal.alarm(120) # 2 minutes timeout + + result = agent.run( + prompt=prompt, + options={ + "execution": { + "max_retries_per_step": 3, + "total_max_retries": 10, + "max_iterations": 20, + } + }, + ) + + # Handle final response + if result: + event_emitter.emit( + MessageEvent( + source="Agent", + message=str(result), + state="final_answer", + ) + ) + + finally: + # Clear timeout + signal.alarm(0) + + except KeyboardInterrupt: + print("\nExiting...") + break + except json.JSONDecodeError as e: + logger.error(f"JSON parsing error: {e!s}") + event_emitter.emit( + MessageEvent( + source="Agent", + message=f"Error parsing JSON: {e!s}", + state="error", + ) + ) + except Exception as e: + logger.error(f"Error processing prompt: {e!s}") + event_emitter.emit(MessageEvent(source="Agent", message=str(e), state="error")) + + except Exception as e: + logger.error(f"Application error: {e!s}") + event_emitter.emit(MessageEvent(source="Agent", message=str(e), state="error")) + + +if __name__ == "__main__": + # Run the async main function + # logging.basicConfig(level=logging.DEBUG) + asyncio.run(run_agent()) diff --git a/python/examples/agents/granite.py b/python/examples/agents/granite.py new file mode 100644 index 00000000..2bd94446 --- /dev/null +++ b/python/examples/agents/granite.py @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from beeai_framework.agents.bee.agent import BeeAgent +from beeai_framework.agents.types import BeeInput, BeeRunOutput +from beeai_framework.backend.chat import ChatModel +from beeai_framework.emitter import Emitter, EventMeta +from beeai_framework.memory.unconstrained_memory import UnconstrainedMemory +from beeai_framework.tools.search import DuckDuckGoSearchTool +from beeai_framework.tools.weather.openmeteo import OpenMeteoTool +from examples.helpers.io import prompt_input + + +async def main() -> None: + chat_model: ChatModel = await ChatModel.from_name("ollama:granite3.1-dense:8b") + + agent = BeeAgent( + BeeInput( + llm=chat_model, tools=[OpenMeteoTool(), DuckDuckGoSearchTool(max_results=3)], memory=UnconstrainedMemory() + ) + ) + + prompt = prompt_input(default="How is the weather in White Plains?") + + async def update_callback(data: dict, event: EventMeta) -> None: + print(f"Agent({data['update']['key']}) ๐Ÿค– : ", data["update"]["parsedValue"]) + + async def observe(emitter: Emitter) -> None: + emitter.on("update", update_callback) + + output: BeeRunOutput = await agent.run( + {"prompt": prompt}, + {"execution": {"total_max_retries": 2, "max_retries_per_step": 3, "max_iterations": 8}}, + ).observe(observe) + + print("Agent ๐Ÿค– : ", output.result.text) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/agents/memory.py b/python/examples/agents/memory.py new file mode 100644 index 00000000..cf9b0dec --- /dev/null +++ b/python/examples/agents/memory.py @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from beeai import LLM + +from beeai_framework.agents.bee import BeeAgent +from beeai_framework.agents.types import BeeInput, BeeRunInput +from beeai_framework.backend.message import UserMessage +from beeai_framework.memory import UnconstrainedMemory + + +async def main() -> None: + memory = UnconstrainedMemory() + await memory.add(UserMessage(content="Who invented wheel and why?")) + agent = BeeAgent(BeeInput(llm=LLM("ollama/llama3.1"), tools=[], memory=memory)) + + result = await agent.run(BeeRunInput()) + + print(result.result.text) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/agents/requirements.txt b/python/examples/agents/requirements.txt new file mode 100644 index 00000000..18a7c192 --- /dev/null +++ b/python/examples/agents/requirements.txt @@ -0,0 +1,2 @@ +wikipedia +langchain_community diff --git a/python/examples/agents/simple.py b/python/examples/agents/simple.py new file mode 100644 index 00000000..260f4fb8 --- /dev/null +++ b/python/examples/agents/simple.py @@ -0,0 +1,20 @@ +import asyncio + +from beeai_framework.agents.bee.agent import BeeAgent +from beeai_framework.agents.types import BeeInput, BeeRunInput, BeeRunOutput +from beeai_framework.backend.chat import ChatModel +from beeai_framework.memory.unconstrained_memory import UnconstrainedMemory +from beeai_framework.tools.weather.openmeteo import OpenMeteoTool + + +async def main() -> None: + llm = await ChatModel.from_name("ollama:granite3.1-dense:8b") + agent = BeeAgent(bee_input=BeeInput(llm=llm, tools=[OpenMeteoTool()], memory=UnconstrainedMemory())) + + result: BeeRunOutput = await agent.run(run_input=BeeRunInput(prompt="How is the weather in White Plains?")) + + print(result.result.text) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/backend/providers/ollama.py b/python/examples/backend/providers/ollama.py new file mode 100644 index 00000000..420c60d1 --- /dev/null +++ b/python/examples/backend/providers/ollama.py @@ -0,0 +1,108 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from pydantic import BaseModel, Field +from pyventus import EventLinker + +from beeai_framework.adapters.ollama.backend.chat import OllamaChatModel +from beeai_framework.backend.chat import ChatModel, ChatModelOutput +from beeai_framework.backend.message import UserMessage +from beeai_framework.cancellation import AbortSignal +from beeai_framework.parsers.line_prefix import LinePrefixParser, Prefix + + +async def ollama_from_name() -> None: + llm = await ChatModel.from_name("ollama:llama3.1") + user_message = UserMessage("what states are part of New England?") + response = await llm.create({"messages": [user_message]}) + print(response.get_text_content()) + + +async def ollama_granite_from_name() -> None: + llm = await ChatModel.from_name("ollama:granite3.1-dense:8b") + user_message = UserMessage("what states are part of New England?") + response = await llm.create({"messages": [user_message]}) + print(response.get_text_content()) + + +async def ollama_sync() -> None: + llm = OllamaChatModel("llama3.1") + user_message = UserMessage("what is the capital of Massachusetts?") + response = await llm.create({"messages": [user_message]}) + print(response.get_text_content()) + + +async def ollama_stream() -> None: + llm = OllamaChatModel("llama3.1") + user_message = UserMessage("How many islands make up the country of Cape Verde?") + response = await llm.create({"messages": [user_message], "stream": True}) + print(response.get_text_content()) + + +async def ollama_stream_abort() -> None: + llm = OllamaChatModel("llama3.1") + user_message = UserMessage("What is the smallest of the Cape Verde islands?") + response = await llm.create({"messages": [user_message], "stream": True, "abort_signal": AbortSignal.timeout(0.5)}) + + if response is not None: + print(response.get_text_content()) + else: + print("No response returned.") + + +async def ollama_structure() -> None: + class TestSchema(BaseModel): + answer: str = Field(description="your final answer") + + llm = OllamaChatModel("llama3.1") + user_message = UserMessage("How many islands make up the country of Cape Verde?") + response = await llm.create_structure( + { + "schema": TestSchema, + "messages": [user_message], + } + ) + print(response.object) + + +async def ollama_stream_parser() -> None: + llm = OllamaChatModel("llama3.1") + + parser = LinePrefixParser(prefixes=[Prefix(name="test", line_prefix="Prefix: ")]) + + @EventLinker.on("newToken") + async def listener(data: dict[str, ChatModelOutput]) -> None: + output: ChatModelOutput = data["value"] + for result in parser.feed(output.get_text_content()): + if result is not None: + print(result.prefix.name, result.content) + + user_message = UserMessage("Produce 3 lines each starting with 'Prefix: ' followed by a sentence and a new line.") + await llm.create({"messages": [user_message], "stream": True}) + + # Pick up any remaining lines in parser buffer + for result in parser.finalize(): + if result is not None: + print(result.prefix.name, result.content) + + +async def main() -> None: + print("*" * 10, "ollama_from_name") + await ollama_from_name() + print("*" * 10, "ollama_granite_from_name") + await ollama_granite_from_name() + print("*" * 10, "ollama_sync") + await ollama_sync() + print("*" * 10, "ollama_stream") + await ollama_stream() + print("*" * 10, "ollama_stream_abort") + await ollama_stream_abort() + print("*" * 10, "ollama_structure") + await ollama_structure() + print("*" * 10, "ollama_stream_parser") + await ollama_stream_parser() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/backend/providers/watsonx.py b/python/examples/backend/providers/watsonx.py new file mode 100644 index 00000000..9787d835 --- /dev/null +++ b/python/examples/backend/providers/watsonx.py @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from pydantic import BaseModel, Field + +from beeai_framework.adapters.watsonx.backend.chat import WatsonxChatModel +from beeai_framework.backend.message import UserMessage +from beeai_framework.cancellation import AbortSignal + +# Setting can be passed here during initiation or pre-configured via environment variables +llm = WatsonxChatModel( + "ibm/granite-3-8b-instruct", + # project_id="WATSONX_PROJECT_ID", + # api_key="WATSONX_API_KEY", + # api_base="WATSONX_API_URL", +) + + +async def watsonx_from_name() -> None: + watsonx_llm = await WatsonxChatModel.from_name( + "ollama:llama3.1", + { + # "project_id": "WATSONX_PROJECT_ID", + # "api_key": "WATSONX_API_KEY", + # "api_base": "WATSONX_API_URL", + }, + ) + user_message = UserMessage("what states are part of New England?") + response = await watsonx_llm.create({"messages": [user_message]}) + print(response.get_text_content()) + + +async def watsonx_sync() -> None: + user_message = UserMessage("what is the capital of Massachusetts?") + response = await llm.create({"messages": [user_message]}) + print(response.get_text_content()) + + +async def watsonx_stream() -> None: + user_message = UserMessage("How many islands make up the country of Cape Verde?") + response = await llm.create({"messages": [user_message], "stream": True}) + print(response.get_text_content()) + + +async def watsonx_stream_abort() -> None: + user_message = UserMessage("What is the smallest of the Cape Verde islands?") + response = await llm.create({"messages": [user_message], "stream": True, "abort_signal": AbortSignal.timeout(0.5)}) + + if response is not None: + print(response.get_text_content()) + else: + print("No response returned.") + + +async def watson_structure() -> None: + class TestSchema(BaseModel): + answer: str = Field(description="your final answer") + + user_message = UserMessage("How many islands make up the country of Cape Verde?") + response = await llm.create_structure( + { + "schema": TestSchema, + "messages": [user_message], + } + ) + print(response.object) + + +async def main() -> None: + print("*" * 10, "watsonx_from_name") + await watsonx_from_name() + print("*" * 10, "watsonx_sync") + await watsonx_sync() + print("*" * 10, "watsonx_stream") + await watsonx_stream() + print("*" * 10, "watsonx_stream_abort") + await watsonx_stream_abort() + print("*" * 10, "watson_structure") + await watson_structure() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/basic.py b/python/examples/basic.py new file mode 100644 index 00000000..58080812 --- /dev/null +++ b/python/examples/basic.py @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from beeai_framework.agents.bee.agent import BeeAgent +from beeai_framework.agents.types import BeeInput, BeeRunInput +from beeai_framework.backend.chat import ChatModel +from beeai_framework.memory.unconstrained_memory import UnconstrainedMemory + + +async def main() -> None: + chat_model = await ChatModel.from_name("ollama:llama3.1") + + agent = BeeAgent(BeeInput(llm=chat_model, tools=[], memory=UnconstrainedMemory())) + + result = await agent.run(BeeRunInput(prompt="What is the capital of Massachusetts")) + + print("answer:", result.result.text) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/helpers/io.py b/python/examples/helpers/io.py new file mode 100644 index 00000000..e2c22461 --- /dev/null +++ b/python/examples/helpers/io.py @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: Apache-2.0 + + +def prompt_input(default: str | None = None) -> str: + prompt: str = "" + + while prompt == "": + user_input = input("User ๐Ÿ‘ค : ").strip() + if user_input: + prompt = user_input + break + elif default: + prompt = default + print("Empty prompt is not allowed. Using example prompt:\n", default) + break + else: + print("Error: Empty prompt is not allowed. Please try again.") + + return prompt diff --git a/python/examples/llms.py b/python/examples/llms.py new file mode 100644 index 00000000..cafb7524 --- /dev/null +++ b/python/examples/llms.py @@ -0,0 +1,51 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio +import sys + +from beeai import BeeAgent +from dotenv import load_dotenv + +from beeai_framework.agents.types import BeeInput, BeeRunInput +from beeai_framework.backend.chat import ChatModel +from beeai_framework.memory.unconstrained_memory import UnconstrainedMemory + +LLMS = { + "ollama": "ollama:llama3.1", + "watsonx": "watsonx:ibm/granite-3-8b-instruct", +} + +HELP = """ +Usage: + examples.beeai_framework.llms +Arguments + `ollama` - requires local ollama service running (i.e., http://127.0.0.1:11434) + `watsonx` - requires environment variable + - WATSONX_URL - base URL of your WatsonX instance + and one of the following + - WATSONX_APIKEY: API key + - WATSONX_TOKEN: auth token +""" + + +async def main(model: str) -> None: + chat_model = await ChatModel.from_name(model) + bee_input = BeeInput(llm=chat_model, tools=[], memory=UnconstrainedMemory()) + agent = BeeAgent(bee_input) + + run_input = BeeRunInput(prompt="What is the smallest of the Cabo Verde islands?") + result = await agent.run(run_input) + + print("answer:", result.result.text) + + +if __name__ == "__main__": + if len(sys.argv) < 2 or sys.argv[1] == "--help": + print(HELP) + else: + load_dotenv() + model = LLMS.get(sys.argv[1]) + if model: + asyncio.run(main(model)) + else: + print(f"Unknown provider: {sys.argv[1]}\n{HELP}") diff --git a/python/examples/memory/agentMemory.py b/python/examples/memory/agentMemory.py new file mode 100644 index 00000000..e7f56974 --- /dev/null +++ b/python/examples/memory/agentMemory.py @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from beeai_framework.agents.bee.agent import BeeAgent +from beeai_framework.agents.types import BeeInput, BeeRunInput +from beeai_framework.backend.chat import ChatModel +from beeai_framework.backend.message import AssistantMessage, UserMessage +from beeai_framework.memory.unconstrained_memory import UnconstrainedMemory + +# Initialize the memory and LLM +memory = UnconstrainedMemory() + + +async def create_agent() -> BeeAgent: + llm = await ChatModel.from_name("ollama:granite3.1-dense:8b") + + # Initialize the agent + agent = BeeAgent(BeeInput(llm=llm, memory=memory, tools=[])) + + return agent + + +async def main() -> None: + try: + # Create user message + user_input = "Hello world!" + user_message = UserMessage(user_input) + + # Await adding user message to memory + await memory.add(user_message) + print("Added user message to memory") + + # Create agent + agent = await create_agent() + + response = await agent.run( + BeeRunInput( + prompt=user_input, + options={ + "execution": { + "max_retries_per_step": 3, + "total_max_retries": 10, + "max_iterations": 20, + } + }, + ) + ) + print(f"Received response: {response}") + + # Create and store assistant's response + assistant_message = AssistantMessage(response.result.text) + + # Await adding assistant message to memory + await memory.add(assistant_message) + print("Added assistant message to memory") + + # Print results + print(f"\nMessages in memory: {len(agent.memory.messages)}") + + if len(agent.memory.messages) >= 1: + user_msg = agent.memory.messages[0] + print(f"User: {user_msg.text}") + + if len(agent.memory.messages) >= 2: + agent_msg = agent.memory.messages[1] + print(f"Agent: {agent_msg.text}") + else: + print("No agent message found in memory") + + except Exception as e: + print(f"An error occurred: {e!s}") + import traceback + + print(traceback.format_exc()) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/memory/slidingMemory.py b/python/examples/memory/slidingMemory.py new file mode 100644 index 00000000..5dc2fec0 --- /dev/null +++ b/python/examples/memory/slidingMemory.py @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from beeai_framework.backend import Message, Role +from beeai_framework.memory.sliding_memory import SlidingMemory, SlidingMemoryConfig + + +async def main() -> None: + try: + # Create sliding memory with size 3 + memory = SlidingMemory( + SlidingMemoryConfig( + size=3, + handlers={"removal_selector": lambda messages: messages[0]}, # Remove oldest message + ) + ) + + # Add messages + await memory.add(Message.of({"role": Role.SYSTEM, "text": "You are a helpful assistant."})) + + await memory.add(Message.of({"role": Role.USER, "text": "What is Python?"})) + + await memory.add(Message.of({"role": Role.ASSISTANT, "text": "Python is a programming language."})) + + # Adding a fourth message should trigger sliding window + await memory.add(Message.of({"role": Role.USER, "text": "What about JavaScript?"})) + + # Print results + print(f"Messages in memory: {len(memory.messages)}") # Should print 3 + for msg in memory.messages: + print(f"{msg.role}: {msg.text}") + + except Exception as e: + print(f"An error occurred: {e!s}") + import traceback + + print(traceback.format_exc()) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/memory/summarizeMemory.py b/python/examples/memory/summarizeMemory.py new file mode 100644 index 00000000..cd3627bb --- /dev/null +++ b/python/examples/memory/summarizeMemory.py @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from beeai_framework.backend.chat import ChatModel +from beeai_framework.backend.message import AssistantMessage, SystemMessage, UserMessage +from beeai_framework.memory.summarize_memory import SummarizeMemory + + +async def main() -> None: + try: + # Initialize the LLM with parameters + llm = await ChatModel.from_name( + "ollama:granite3.1-dense:8b", + # ChatModelParameters(temperature=0\), + ) + + # Create summarize memory instance + memory = SummarizeMemory(llm) + + # Add messages + await memory.add_many( + [ + SystemMessage("You are a guide through France."), + UserMessage("What is the capital?"), + AssistantMessage("Paris"), + UserMessage("What language is spoken there?"), + ] + ) + + # Print results + print(f"Is Empty: {memory.is_empty()}") + print(f"Message Count: {len(memory.messages)}") + + if memory.messages: + print(f"Summary: {memory.messages[0].get_texts()[0].get('text')}") + + except Exception as e: + print(f"An error occurred: {e!s}") + import traceback + + print(traceback.format_exc()) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/memory/tokenMemory.py b/python/examples/memory/tokenMemory.py new file mode 100644 index 00000000..b5679dd0 --- /dev/null +++ b/python/examples/memory/tokenMemory.py @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio +import math + +from beeai import LLM + +from beeai_framework.backend import Message, Role +from beeai_framework.memory import TokenMemory + +# Initialize the LLM +llm = LLM() + +# Initialize TokenMemory with handlers +memory = TokenMemory( + llm=llm, + max_tokens=None, # Will be inferred from LLM + capacity_threshold=0.75, + sync_threshold=0.25, + handlers={ + "removal_selector": lambda messages: next((msg for msg in messages if msg.role != Role.SYSTEM), messages[0]), + "estimate": lambda msg: math.ceil((len(msg.role) + len(msg.text)) / 4), + }, +) + + +async def main() -> None: + try: + # Add system message + system_message = Message.of({"role": Role.SYSTEM, "text": "You are a helpful assistant."}) + await memory.add(system_message) + print(f"Added system message (hash: {hash(system_message)})") + + # Add user message + user_message = Message.of({"role": Role.USER, "text": "Hello world!"}) + await memory.add(user_message) + print(f"Added user message (hash: {hash(user_message)})") + + # Check initial memory state + print("\nInitial state:") + print(f"Is Dirty: {memory.is_dirty}") + print(f"Tokens Used: {memory.tokens_used}") + + # Sync token counts + await memory.sync() + print("\nAfter sync:") + print(f"Is Dirty: {memory.is_dirty}") + print(f"Tokens Used: {memory.tokens_used}") + + # Print all messages + print("\nMessages in memory:") + for msg in memory.messages: + print(f"{msg.role}: {msg.text} (hash: {hash(msg)})") + + except Exception as e: + print(f"An error occurred: {e!s}") + import traceback + + print(traceback.format_exc()) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/memory/unconstrainedMemory.py b/python/examples/memory/unconstrainedMemory.py new file mode 100644 index 00000000..004d8f3d --- /dev/null +++ b/python/examples/memory/unconstrainedMemory.py @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from beeai_framework.backend import Message, Role +from beeai_framework.memory import UnconstrainedMemory + + +async def main() -> None: + try: + # Create memory instance + memory = UnconstrainedMemory() + + # Add a message + await memory.add(Message.of({"role": Role.USER, "text": "Hello world!"})) + + # Print results + print(f"Is Empty: {memory.is_empty()}") # Should print: False + print(f"Message Count: {len(memory.messages)}") # Should print: 1 + + print("\nMessages:") + for msg in memory.messages: + print(f"{msg.role}: {msg.text}") + + except Exception as e: + print(f"An error occurred: {e!s}") + import traceback + + print(traceback.format_exc()) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/notebooks/README.md b/python/examples/notebooks/README.md new file mode 100644 index 00000000..901bf7b4 --- /dev/null +++ b/python/examples/notebooks/README.md @@ -0,0 +1,36 @@ +# BeeAI Python Notebook Examples + +These examples organize agentic topics from the BeeAI framework for Python into structured groups, making it easier to learn the framework. + +## Pre-requisites + +We do not assume any particular method/software for executing these notebooks. The minimum requirements are contained within the `requirements.txt` files. These can be installed after activating your Python virtual environment, as follows: + +```shell +pip install -r examples/requirements.txt +pip install -r examples/notebooks/requirements.txt +``` + +> [!NOTE] +> You can find out how to activate your Python virtual environment in the main [README](/README.md) of this repository. + +If you're not familiar with how to get started with Jupyter Notebooks you can read the [documentation](https://docs.jupyter.org). As a quick start, you can try using Jupyter Lab, as follows: + +```shell +pip install jupyterlab +jupyter-lab examples/notebooks/basic.ipynb +``` + +## Notebooks + +The basics notebook introduces the core constructs provided by the BeeAI framework such as PromptTemplates, Messages, Memory and how to setup and generate output using a ChatModel. + +[basics.ipynb](basics.ipynb) + +The workflows notebook describes how to use BeeAI Workflows to build AI agents of various complexities. This notebook builds on topics discussed in the [basics.ipynb](basics.ipynb). + +[workflows.ipynb](workflows.ipynb) + +Finally the agents notebook describes how to setup and use the BeeAI ReActAgent. This is a pre canned ReAct agent that can be configured with tools, and instructions to solve problems. + +[agents.ipynb](agents.ipynb) diff --git a/python/examples/notebooks/agents.ipynb b/python/examples/notebooks/agents.ipynb new file mode 100644 index 00000000..25b2cbc2 --- /dev/null +++ b/python/examples/notebooks/agents.ipynb @@ -0,0 +1,283 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# BeeAI ReAct agents\n", + "\n", + "The BeeAI ReAct agent is a pre-canned ReAct agent implementation that can be configured with tools and instructions. \n", + "\n", + "The ReAct pattern, short for Reasoning and Acting, is a framework used in AI models, particularly in language models, that separates the reasoning process from the action-taking process. \n", + "It enhances the model's ability to handle complex queries by allowing it to reason about the problem, decide on an action, and then observe the result of that action to inform further reasoning and actions. \n", + "\n", + "The ReAct agent provides a convenient out of the box agent implementation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Basic ReAct Agent\n", + "\n", + "To configure a ReAct agent you will need to define a ChatModel and construct a BeeAgent.\n", + "\n", + "In this example we do not provide any tools to the agent, it will attempt to provide an answer from its own memory.\n", + "\n", + "Try changing the text input in the call to `agent.run()` to experiment with getting different answers." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import warnings\n", + "\n", + "from beeai_framework.agents.bee.agent import BeeAgent\n", + "from beeai_framework.agents.types import BeeInput, BeeRunInput, BeeRunOutput\n", + "from beeai_framework.backend.chat import ChatModel\n", + "from beeai_framework.memory.unconstrained_memory import UnconstrainedMemory\n", + "\n", + "warnings.simplefilter(\"ignore\", UserWarning)\n", + "\n", + "# Construct ChatModel\n", + "chat_model: ChatModel = await ChatModel.from_name(\"ollama:granite3.1-dense:8b\")\n", + "\n", + "# Construct Agent instance with the chat model\n", + "agent = BeeAgent(bee_input=BeeInput(llm=chat_model, tools=[], memory=UnconstrainedMemory()))\n", + "\n", + "# Run the agent\n", + "result: BeeRunOutput = await agent.run(run_input=BeeRunInput(prompt=\"What chemical elements make up a water molecule?\"))\n", + "\n", + "print(result.result.text)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Tools\n", + "\n", + "To move beyond chatting to an LLM, tools can be provided to the agent. There are different ways to do this via built-in tools from the framework, importing tools from other libraries or writing your own custom tooling.\n", + "\n", + "## Built-in tools\n", + "\n", + "BeeAI comes with some built-in tools that are provided as part of the library. These can easily be imported and added to the agent.\n", + "\n", + "Your agent is now capable of doing a little more than the underlying Large Language Model is able to do by itself as it can now rely on tools to fetch additional knowledge/context from elsewhere.\n", + "\n", + "In this example we give the agent a weather forecast lookup tool called OpenMeteoTool." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from beeai_framework.backend.chat import ChatModel\n", + "from beeai_framework.tools.weather.openmeteo import OpenMeteoTool\n", + "\n", + "chat_model: ChatModel = await ChatModel.from_name(\"ollama:granite3.1-dense:8b\")\n", + "\n", + "# create an agent using the default LLM and add the OpenMeteoTool that is capable of fetching weather-based information\n", + "agent = BeeAgent(bee_input=BeeInput(llm=chat_model, tools=[OpenMeteoTool()], memory=UnconstrainedMemory()))\n", + "\n", + "result: BeeRunOutput = await agent.run(run_input=BeeRunInput(prompt=\"What's the current weather in London?\"))\n", + "\n", + "print(result.result.text)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Custom Tools\n", + "\n", + "Tools can be written from scratch and added to your agent.\n", + "\n", + "Use the `@tool` decorator on your tool function and pass it into the agent in the same way as built-int tools." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "from urllib.parse import quote\n", + "\n", + "import requests\n", + "from beeai import BeeAgent, tool\n", + "\n", + "from beeai_framework.backend.chat import ChatModel\n", + "from beeai_framework.tools.tool import StringToolOutput\n", + "from beeai_framework.tools.weather.openmeteo import OpenMeteoTool\n", + "\n", + "\n", + "# defining a tool using the `tool` decorator\n", + "# Note: the pydoc is important as it serves as the tool description to the agent\n", + "@tool\n", + "def basic_calculator(expression: str) -> str:\n", + " \"\"\"\n", + " A calculator tool that performs mathematical operations.\n", + "\n", + " Args:\n", + " expression: The mathematical expression to evaluate (e.g., \"2 + 3 * 4\").\n", + "\n", + " Returns:\n", + " The result of the mathematical expression\n", + " \"\"\"\n", + " try:\n", + " encoded_expression = quote(expression)\n", + " math_url = f\"https://newton.vercel.app/api/v2/simplify/{encoded_expression}\"\n", + "\n", + " response = requests.get(\n", + " math_url,\n", + " headers={\"Accept\": \"application/json\"},\n", + " )\n", + " response.raise_for_status()\n", + " return StringToolOutput(json.dumps(response.json()))\n", + "\n", + " except Exception as e:\n", + " raise RuntimeError(f\"Error evaluating expression: {e!s}\") from Exception\n", + "\n", + "\n", + "chat_model: ChatModel = await ChatModel.from_name(\"ollama:granite3.1-dense:8b\")\n", + "agent = BeeAgent(bee_input=BeeInput(llm=chat_model, tools=[basic_calculator], memory=UnconstrainedMemory()))\n", + "result: BeeRunOutput = await agent.run(run_input=BeeRunInput(prompt=\"What is the square root of 36?\"))\n", + "print(result.result.text)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Imported Tools\n", + "\n", + "Tools can be imported from other libraries.\n", + "\n", + "The example below shows how to integrate a tool from LangChain. It also demonstrates long form tool writing without the use of the `@tool` decorator (see below for how this can be re-written in short form using the `@tool` decorator)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Any\n", + "\n", + "from beeai import BeeAgent\n", + "from langchain_community.tools import WikipediaQueryRun\n", + "from langchain_community.utilities import WikipediaAPIWrapper\n", + "from pydantic import BaseModel, Field\n", + "\n", + "from beeai_framework.tools import Tool\n", + "\n", + "\n", + "class LangChainWikipediaToolInput(BaseModel):\n", + " query: str = Field(description=\"The topic or question to search for on Wikipedia.\")\n", + "\n", + "\n", + "class LangChainWikipediaTool(Tool):\n", + " \"\"\"Adapter class to integrate LangChain's Wikipedia tool with our framework\"\"\"\n", + "\n", + " name = \"Wikipedia\"\n", + " description = \"Search factual and historical information from Wikipedia about given topics.\"\n", + " input_schema = LangChainWikipediaToolInput\n", + "\n", + " def __init__(self) -> None:\n", + " super().__init__()\n", + " wikipedia = WikipediaAPIWrapper()\n", + " self.wikipedia = WikipediaQueryRun(api_wrapper=wikipedia)\n", + "\n", + " def _run(self, input: LangChainWikipediaToolInput, _: Any | None = None) -> None:\n", + " query = input.query\n", + " try:\n", + " result = self.wikipedia.run(query)\n", + " return StringToolOutput(result=result)\n", + " except Exception as e:\n", + " print(f\"Wikipedia search error: {e!s}\")\n", + " return f\"Error searching Wikipedia: {e!s}\"\n", + "\n", + "\n", + "chat_model: ChatModel = await ChatModel.from_name(\"ollama:granite3.1-dense:8b\")\n", + "agent = BeeAgent(bee_input=BeeInput(llm=chat_model, tools=[LangChainWikipediaTool()], memory=UnconstrainedMemory()))\n", + "result: BeeRunOutput = await agent.run(\n", + " run_input=BeeRunInput(prompt=\"Who is the current president of the European Commission?\")\n", + ")\n", + "print(result.result.text)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The above example can be re-written in shorter form by adding the `@tool` decorator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from beeai import BeeAgent, tool\n", + "from langchain_community.tools import WikipediaQueryRun # noqa: F811\n", + "from langchain_community.utilities import WikipediaAPIWrapper # noqa: F811\n", + "\n", + "\n", + "# defining a tool using the `tool` decorator\n", + "# Note: the pydoc is important as it serves as the tool description to the agent\n", + "@tool\n", + "def langchain_wikipedia_tool(expression: str) -> str:\n", + " \"\"\"\n", + " Search factual and historical information, including biography, history, politics, geography, society, culture,\n", + " science, technology, people, animal species, mathematics, and other subjects.\n", + "\n", + " Args:\n", + " expression: The topic or question to search for on Wikipedia.\n", + "\n", + " Returns:\n", + " The information found via searching Wikipedia.\n", + " \"\"\"\n", + " wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())\n", + " return StringToolOutput(wikipedia.run(expression))\n", + "\n", + "\n", + "# using the tool in an agent\n", + "chat_model: ChatModel = await ChatModel.from_name(\"ollama:granite3.1-dense:8b\")\n", + "agent = BeeAgent(bee_input=BeeInput(llm=chat_model, tools=[langchain_wikipedia_tool], memory=UnconstrainedMemory()))\n", + "result: BeeRunOutput = await agent.run(\n", + " run_input=BeeRunInput(prompt=\"Who is the current president of the European Commission?\")\n", + ")\n", + "print(result.result.text)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "beeai-iRW9JlkS-py3.12", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/examples/notebooks/basics.ipynb b/python/examples/notebooks/basics.ipynb new file mode 100644 index 00000000..ab1d7e2e --- /dev/null +++ b/python/examples/notebooks/basics.ipynb @@ -0,0 +1,350 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# BeeAI Framework Basics\n", + "\n", + "These examples show some of the basic usage patterns of BeeAI in Python. They gradually build in complexity to give a rounded overview." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prompt Templates\n", + "\n", + "One of the most basic constructs provided by the BeeAI framework is the `PromptTemplate`. Using a PromptTemplate you can incorporate data into a prompt before sending it a language model.\n", + "Prompt templates are based on the mustache templating language.\n", + "\n", + "The following example shows you how to create a RAG (Retrieval Augmented Generation) template and apply the template to your data to generate a prompt." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import warnings\n", + "\n", + "from pydantic import BaseModel\n", + "\n", + "from beeai_framework.utils.templates import PromptTemplate\n", + "\n", + "warnings.simplefilter(\"ignore\", UserWarning)\n", + "\n", + "\n", + "# The input schema model: Defines the structure of the input data that can passed to the template\n", + "class RAGTemplateInput(BaseModel):\n", + " question: str\n", + " context: str\n", + "\n", + "\n", + "# Define the template\n", + "rag_template: PromptTemplate = PromptTemplate(\n", + " schema=RAGTemplateInput,\n", + " template=\"\"\"\n", + "Context: {{context}}\n", + "Question: {{question}}\n", + "\n", + "Provide a concise answer based on the context. Avoid statements such as 'Based on the context' or 'According to the context' etc. \"\"\",\n", + ")\n", + "\n", + "# Render the template using an instance of the input model\n", + "prompt = rag_template.render(\n", + " RAGTemplateInput(\n", + " question=\"What is the capital of France?\",\n", + " context=\"France is a country in Europe. Its capital city is Paris, known for its culture and history.\",\n", + " )\n", + ")\n", + "\n", + "print(prompt)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## More complex templates\n", + "\n", + "That was a simple template but the `PromptTemplate` class can also be used to render more complex objects and include conditional logic.\n", + "\n", + "The next example is a template that includes a question and a set of complex search results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from pydantic import BaseModel\n", + "\n", + "from beeai_framework.utils.templates import PromptTemplate\n", + "\n", + "\n", + "class SearchResult(BaseModel):\n", + " title: str\n", + " url: str\n", + " content: str\n", + "\n", + "\n", + "class SearchTemplateInput(BaseModel):\n", + " question: str\n", + " results: list[SearchResult]\n", + "\n", + "\n", + "# Define the template\n", + "search_template: PromptTemplate = PromptTemplate(\n", + " schema=SearchTemplateInput,\n", + " template=\"\"\"\n", + "Search results:\n", + "{{#results.0}}\n", + "{{#results}}\n", + "Title: {{title}}\n", + "Url: {{url}}\n", + "Content: {{content}}\n", + "{{/results}}\n", + "{{/results.0}}\n", + "\n", + "Question: {{question}}\n", + "Provide a concise answer based on the search results provided.\"\"\",\n", + ")\n", + "\n", + "# Render the template using an instance of the input model\n", + "prompt = search_template.render(\n", + " SearchTemplateInput(\n", + " question=\"What is the capital of France?\",\n", + " results=[\n", + " SearchResult(\n", + " title=\"France\",\n", + " url=\"https://en.wikipedia.org/wiki/France\",\n", + " content=\"France is a country in Europe. Its capital city is Paris, known for its culture and history.\",\n", + " )\n", + " ],\n", + " )\n", + ")\n", + "\n", + "print(prompt)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The ChatModel\n", + "\n", + "Once you have your PromptTemplate you can start to think about prompting a model. BeeAI supports a variety of LLM's that you can use via the `ChatModel` interface. \n", + "\n", + "In this section we will use the `IBM Granite 3.1 8B` language model via the Ollama provider.\n", + "\n", + "[How to run Granite 3.1 using Ollama](https://www.ibm.com/granite/docs/run/granite-on-mac/granite/).\n", + "\n", + "Before creating a ChatModel we need to briefly cover Messages. The `ChatModel` interface operates using messages. Using messages you can represent a chat between the user and the assistant (the LLM) which is a convenient interaction method. Lets start by creating a `UserMessage` to say hello and ask a simple question." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from beeai_framework.backend.message import UserMessage\n", + "\n", + "# Create a user message to start a chat with the model\n", + "user_message = UserMessage(content=\"Hello! Can you tell me what is the capital of France?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now create a `ChatModel` and send this message to Granite." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from beeai_framework.backend.chat import ChatModel, ChatModelInput, ChatModelOutput\n", + "\n", + "# Create a ChatModel to interface with granite3.1-dense:8b on a local ollama\n", + "model = await ChatModel.from_name(\"ollama:granite3.1-dense:8b\")\n", + "\n", + "output: ChatModelOutput = await model.create(ChatModelInput(messages=[user_message]))\n", + "\n", + "print(output.get_text_content())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Memory \n", + "The model has provided a response! We can now start to build up a `Memory`. Memory is just a convenient way of storing a set of messages that can be considered as the history of the dialog between the user and the llm.\n", + "\n", + "In this next example we will construct a memory from our existing messages and add a new user message. Notice that the new message can implicitly refer to content from prior messages. Internally the `ChatModel` formats all the messages and sends them to the LLM." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from beeai_framework.backend.message import AssistantMessage\n", + "from beeai_framework.memory.unconstrained_memory import UnconstrainedMemory\n", + "\n", + "memory = UnconstrainedMemory()\n", + "\n", + "await memory.add_many(\n", + " [\n", + " user_message,\n", + " AssistantMessage(content=output.get_text_content()),\n", + " UserMessage(content=\"If you had to recommend one thing to do there, what would it be?\"),\n", + " ]\n", + ")\n", + "\n", + "output: ChatModelOutput = await model.create(ChatModelInput(messages=memory.messages))\n", + "\n", + "print(output.get_text_content())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Combining Templates and Messages\n", + "\n", + "If you would like to use a `PromptTemplate` from earlier with the Granite ChatModel, you can render the template and then put the content into a Message." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Some context that the model will use to provide an answer. Source wikipedia: https://en.wikipedia.org/wiki/Ireland\n", + "context = \"\"\"The geography of Ireland comprises relatively low-lying mountains surrounding a central plain, with several navigable rivers extending inland.\n", + "Its lush vegetation is a product of its mild but changeable climate which is free of extremes in temperature.\n", + "Much of Ireland was woodland until the end of the Middle Ages. Today, woodland makes up about 10% of the island,\n", + "compared with a European average of over 33%, with most of it being non-native conifer plantations.\n", + "The Irish climate is influenced by the Atlantic Ocean and thus very moderate, and winters are milder than expected for such a northerly area,\n", + "although summers are cooler than those in continental Europe. Rainfall and cloud cover are abundant.\n", + "\"\"\"\n", + "\n", + "# Lets reuse our RAG template from earlier!\n", + "prompt = rag_template.render(RAGTemplateInput(question=\"How much of Ireland is forested?\", context=context))\n", + "\n", + "output: ChatModelOutput = await model.create(ChatModelInput(messages=[UserMessage(content=prompt)]))\n", + "\n", + "print(output.get_text_content())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Structured Outputs\n", + "\n", + "Sometimes (often!) you will want llm output in a specific format. This will allow you to interface the llm with your code in a reliable manner i.e. if you want the llm to produce the input to a function or tool. To achieve this you can use structured output.\n", + "\n", + "In the example below I want Granite to generate a character using a very specific format." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Literal\n", + "\n", + "from pydantic import Field\n", + "\n", + "\n", + "class CharacterSchema(BaseModel):\n", + " name: str = Field(description=\"The name of the character.\")\n", + " occupation: str = Field(description=\"The occupation of the character.\")\n", + " species: Literal[\"Human\", \"Insectoid\", \"Void-Serpent\", \"Synth\", \"Ethereal\", \"Liquid-Metal\"] = Field(\n", + " description=\"The race of the character.\"\n", + " )\n", + " back_story: str = Field(description=\"Brief backstory of this character.\")\n", + "\n", + "\n", + "user_message = UserMessage(\n", + " \"Crete a fantasy sci-fi character for my new game. This character will be the main protagonist.\"\n", + ")\n", + "response = await model.create_structure(\n", + " {\n", + " \"schema\": CharacterSchema,\n", + " \"messages\": [user_message],\n", + " }\n", + ")\n", + "\n", + "print(response.object)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## System Prompts\n", + "\n", + "The system prompt or SystemMessage is a special message type that can influence the general behavior of an LLM. If you would like to influence an LLM in a general manner you can include a SystemMessage.\n", + "\n", + "In the example below we add a system message that influences the LLM to speak like a pirate!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from beeai_framework.backend.message import SystemMessage\n", + "\n", + "system_message = SystemMessage(content=\"You are pirate. You always respond using pirate slang.\")\n", + "user_message = UserMessage(content=\"What is a baby hedgehog called?\")\n", + "output: ChatModelOutput = await model.create(ChatModelInput(messages=[system_message, user_message]))\n", + "\n", + "print(output.get_text_content())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Building an Agent\n", + "\n", + "You are now ready to build you first agent. Move on to [workflows.ipynb](workflows.ipynb).\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "beeai-iRW9JlkS-py3.12", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/examples/notebooks/requirements.txt b/python/examples/notebooks/requirements.txt new file mode 100644 index 00000000..d919f6f3 --- /dev/null +++ b/python/examples/notebooks/requirements.txt @@ -0,0 +1 @@ +ipykernel diff --git a/python/examples/notebooks/searXNG.md b/python/examples/notebooks/searXNG.md new file mode 100644 index 00000000..0802ebbb --- /dev/null +++ b/python/examples/notebooks/searXNG.md @@ -0,0 +1,52 @@ +# ๐Ÿ” SearXNG Setup Instructions + +## Description + +SearXNG is a metasearch engine that aggregates results from multiple search engines. SearXNG does not require an API key, you can run SearXNG directly on your laptop to facilitate easy access to web search functionality for your agent. + +## Setup + +Follow the steps to create a private SearXNG instance. For more advanced usage see the [SearXNG project documentation](https://github.com/searxng/searxng). + +### 1. Create a local folder for the SearXNG configuration files. + +The files will be automatically written to this location, but you will need to make a minor modification. + +``` + mkdir ~/searxng +``` + +### 2. Run the SearXNG docker container. + +``` +docker run -d --name searxng -p 8888:8080 -v ./searxng:/etc/searxng --restart always searxng/searxng:latest +``` + +### 3. Edit the configuration files and restart the container. + +When you first run a SearXNG docker container, it will write configuration files to the `~/searxng` folder. + +``` +settings.yml +uwsgi.ini +``` + +Open `~/searxng/settings.yml`, find the `formats:` list and add `json`. + +```yaml +search: + formats: + - html + - json +``` + +Stop and restart the container. + +``` +docker stop searxng +docker run -d --name searxng -p 8888:8080 -v ./searxng:/etc/searxng --restart always searxng/searxng:latest +``` + +### 4. Check installation + +Navigate to `http://localhost:8888` and you should see an SearXNG interface. diff --git a/python/examples/notebooks/workflows.ipynb b/python/examples/notebooks/workflows.ipynb new file mode 100644 index 00000000..a86f25cc --- /dev/null +++ b/python/examples/notebooks/workflows.ipynb @@ -0,0 +1,402 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# BeeAI Workflows\n", + "\n", + "In the previous notebook you learned the basics of the BeeAI framework such as PromptTemplates, Messages, Memory, Model Backends and various forms of output generation (freeform & structured). In this notebook we will focus on Workflows.\n", + "\n", + "Workflows allow you to combine what you have already learned to develop an AI agent. The behavior of the agent is defined via workflow steps, and transitions between those steps. You can think of the Workflow as a graph that describes the behavior of an agent. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Basics of Workflows\n", + "\n", + "The main components of a BeeAI workflow is state, defined as a pydantic model, and steps, which are defined using python functions.\n", + "\n", + "You can think of State as structured memory that the workflow can read and write during execution.\n", + "\n", + "Steps are the the functional components of the Workflow that connect together to perform the actions of the agent.\n", + "\n", + "The following simple workflow example exhibits the following key features: \n", + "\n", + "- The state definition contains a required message field.\n", + "- The step (my_first_step) is defined as a function parameterized with the state instance.\n", + "- The state can be modified in a step and state changes are persisted between steps and workflow executions.\n", + "- The step function returns a string `Workflow.END` which indicates the name of the next step (this is how step transitions are implemented).\n", + "- `Workflow.END` indicates the end of the workflow." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import traceback\n", + "import warnings\n", + "\n", + "from pydantic import BaseModel, ValidationError\n", + "\n", + "from beeai_framework.workflows.workflow import Workflow, WorkflowError\n", + "\n", + "warnings.simplefilter(\"ignore\", UserWarning)\n", + "\n", + "\n", + "# Define global state that is accessible to each step in the workflow graph\n", + "# The message field is required when instantiating the state object\n", + "class MessageState(BaseModel):\n", + " message: str\n", + "\n", + "\n", + "# Each step in the workflow is defined as a python function\n", + "async def my_first_step(state: MessageState) -> None:\n", + " state.message += \" World\" # Modify the state\n", + " print(\"Running first step!\")\n", + " return Workflow.END\n", + "\n", + "\n", + "try:\n", + " # Define the structure of the workflow graph\n", + " basic_workflow = Workflow(schema=MessageState, name=\"MyWorkflow\")\n", + "\n", + " # Add a step, each step has a name and a function that implements the step\n", + " basic_workflow.add_step(\"my_first_step\", my_first_step)\n", + "\n", + " # Execute the workflow\n", + " basic_response = await basic_workflow.run(MessageState(message=\"Hello\"))\n", + "\n", + " print(\"State after workflow:\", basic_response.state.message)\n", + "\n", + "except WorkflowError:\n", + " traceback.print_exc()\n", + "except ValidationError:\n", + " traceback.print_exc()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## A Multi step workflow with tools\n", + "\n", + "You now know the basic components of a Workflow. To explore the power of BeeAI Workflows we will now walk through the implementation of a simple web search agent built as a Workflow.\n", + "\n", + "This agent devises a search query based on an input question, runs the query to get search results, and then generates an answer to the question based on the retrieved search results.\n", + "\n", + "Lets start with some imports." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_community.utilities import SearxSearchWrapper\n", + "from pydantic import Field\n", + "\n", + "from beeai_framework.backend.chat import ChatModel, ChatModelOutput, ChatModelStructureOutput\n", + "from beeai_framework.backend.message import UserMessage\n", + "from beeai_framework.utils.templates import PromptTemplate" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we can define our workflow State.\n", + "\n", + "In this case we have a `question` which is a required field when instantiating the State. The other fields `search_results` and `answer` are optional during construction (defaulting to None) but will be populated by the workflow steps during execution." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Workflow State\n", + "class SearchAgentState(BaseModel):\n", + " question: str\n", + " search_results: str | None = None\n", + " answer: str | None = None" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next we define the ChatModel instance that we use for interaction with our LLM. We will use IBM Granite 3.1 8B via ollama." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create a ChatModel to interface with granite3.1-dense:8b on a local ollama\n", + "model = await ChatModel.from_name(\"ollama:granite3.1-dense:8b\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is a web search agent, so we need a way to run web searches. We will use the `SearxSearchWrapper` from the langchain community tools project.\n", + "\n", + "To use the `SearxSearchWrapper` you will need to setup a local SearXNG service. \n", + "\n", + "Follow the instructions at [searXNG.md](searXNG.md) to configure a local searXNG instance." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Web search tool\n", + "search_tool = SearxSearchWrapper(searx_host=\"http://127.0.0.1:8888\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the workflow we make extensive use of prompt templates and structured outputs.\n", + "\n", + "Here we define the various templates, input schemas and structured output schemas that we will need to implement the agent. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PromptTemplate Input Schemas\n", + "class QuestionInput(BaseModel):\n", + " question: str\n", + "\n", + "\n", + "class SearchRAGInput(BaseModel):\n", + " question: str\n", + " search_results: str\n", + "\n", + "\n", + "# Prompt Templates\n", + "search_query_template = PromptTemplate(\n", + " schema=QuestionInput,\n", + " template=\"\"\"Convert the following question into a concise, effective web search query using keywords and operators for accuracy.\n", + "Question: {{question}}\"\"\",\n", + ")\n", + "\n", + "search_rag_template = PromptTemplate(\n", + " schema=SearchRAGInput,\n", + " template=\"\"\"Search results:\n", + "{{search_results}}\n", + "\n", + "Question: {{question}}\n", + "Provide a concise answer based on the search results provided. If the results are irrelevant or insufficient, say 'I don't know.' Avoid phrases such as 'According to the results...'.\"\"\",\n", + ")\n", + "\n", + "\n", + "# Structured output Schemas\n", + "class WebSearchQuery(BaseModel):\n", + " query: str = Field(description=\"The web search query.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can define the first step of the workflow named `web_search`. \n", + "\n", + "This step prompts the llm to generate an effective search query using the search_query_template.\n", + "\n", + "The search query is then used to run a web search using the search tool. The search results are stored in the `search_results` field in the workflow state.\n", + "\n", + "The step then returns `generate_answer` which passes control to the step names `generate_answer`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "async def web_search(state: SearchAgentState) -> str:\n", + " print(\"Step: \", \"web_search\")\n", + " # Generate a search query\n", + " prompt = search_query_template.render(QuestionInput(question=state.question))\n", + " response: ChatModelStructureOutput = await model.create_structure(\n", + " {\n", + " \"schema\": WebSearchQuery,\n", + " \"messages\": [UserMessage(prompt)],\n", + " }\n", + " )\n", + " # Run search and store results in state\n", + " state.search_results = search_tool.run(response.object[\"query\"])\n", + " return \"generate_answer\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The next step in the Workflow is `generate_answer`, this steps takes the `question` and the `search_results` from the workflow state and uses the search_rag_template to generate an answer.\n", + "\n", + "The answer is stored in the state and the workflow is ended by returning `Workflow.END`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "async def generate_answer(state: SearchAgentState) -> str:\n", + " print(\"Step: \", \"generate_answer\")\n", + " # Generate answer based on question and search results from previous step.\n", + " prompt = search_rag_template.render(\n", + " SearchRAGInput(question=state.question, search_results=state.search_results or \"No results available.\")\n", + " )\n", + " output: ChatModelOutput = await model.create({\"messages\": [UserMessage(prompt)]})\n", + "\n", + " # Store answer in state\n", + " state.answer = output.get_text_content()\n", + " return Workflow.END" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "FInally we define the overall workflow and add the steps we developed earlier. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "try:\n", + " # Define the structure of the workflow graph\n", + " search_agent_workflow = Workflow(schema=SearchAgentState, name=\"WebSearchAgent\")\n", + " search_agent_workflow.add_step(\"web_search\", web_search)\n", + " search_agent_workflow.add_step(\"generate_answer\", generate_answer)\n", + "\n", + " # Execute the workflow\n", + " search_response = await search_agent_workflow.run(\n", + " SearchAgentState(question=\"What is the term for a baby hedgehog?\")\n", + " )\n", + "\n", + " print(\"*****\")\n", + " print(\"Question: \", search_response.state.question)\n", + " print(\"Answer: \", search_response.state.answer)\n", + "\n", + "except WorkflowError:\n", + " traceback.print_exc()\n", + "except ValidationError:\n", + " traceback.print_exc()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Adding Memory to a Workflow Agent\n", + "\n", + "The web search agent from the previous example can answer questions but is unable to converse because it does not maintain message history. \n", + "\n", + "In the next example we show you how to add memory to your agent, so you can chat interactively." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Workflow State\n", + "from pydantic import InstanceOf\n", + "\n", + "from beeai_framework.backend.message import AssistantMessage, SystemMessage\n", + "from beeai_framework.memory.unconstrained_memory import UnconstrainedMemory\n", + "\n", + "\n", + "class ChatState(BaseModel):\n", + " memory: InstanceOf[UnconstrainedMemory]\n", + " output: str | None = None\n", + "\n", + "\n", + "async def chat(state: ChatState) -> str:\n", + " output: ChatModelOutput = await model.create({\"messages\": state.memory.messages})\n", + " state.output = output.get_text_content()\n", + " return Workflow.END\n", + "\n", + "\n", + "memory = UnconstrainedMemory()\n", + "await memory.add(SystemMessage(content=\"You are a helpful and friendly AI assistant.\"))\n", + "\n", + "try:\n", + " # Define the structure of the workflow graph\n", + " chat_workflow = Workflow(ChatState)\n", + " chat_workflow.add_step(\"chat\", chat)\n", + " chat_workflow.add_step(\"generate_answer\", generate_answer)\n", + "\n", + " # Add user message to memory\n", + " await memory.add(UserMessage(content=input(\"User: \")))\n", + " # Run workflow with memory\n", + " response = await chat_workflow.run(ChatState(memory=memory))\n", + " # Add assistant response to memory\n", + " await memory.add(AssistantMessage(content=response.state.output))\n", + "\n", + " print(\"\\n\".join(f\"{m.role}: {m.text}\" for m in memory.messages))\n", + "\n", + "except WorkflowError:\n", + " traceback.print_exc()\n", + "except ValidationError:\n", + " traceback.print_exc()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ReAct Agents\n", + "\n", + "You are now familiar with Workflow based agents, next you can explore pre-canned ReAct agents. Move on to [agents.ipynb](agents.ipynb)." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "beeai-iRW9JlkS-py3.12", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/examples/requirements.txt b/python/examples/requirements.txt new file mode 100644 index 00000000..7f05330c --- /dev/null +++ b/python/examples/requirements.txt @@ -0,0 +1,2 @@ +langchain-community +langchain diff --git a/python/examples/runners/basic_runner.py b/python/examples/runners/basic_runner.py new file mode 100644 index 00000000..28a38a4f --- /dev/null +++ b/python/examples/runners/basic_runner.py @@ -0,0 +1,86 @@ +import asyncio +from dataclasses import asdict + +from beeai_framework.agents.runners.base import ( + BeeRunnerToolInput, + BeeRunnerToolResult, + RunnerIteration, +) +from beeai_framework.agents.runners.default.prompts import ( + AssistantPromptTemplate, + AssistantPromptTemplateInput, +) +from beeai_framework.agents.runners.default.runner import DefaultRunner +from beeai_framework.agents.types import ( + BeeAgentExecutionConfig, + BeeInput, + BeeIterationResult, + BeeMeta, + BeeRunInput, + BeeRunOptions, +) +from beeai_framework.backend.message import AssistantMessage +from beeai_framework.llms.llm import LLM +from beeai_framework.memory import TokenMemory +from beeai_framework.tools.weather.openmeteo import OpenMeteoTool + + +# Main async function for testing +async def main() -> None: + llm = LLM( + model="llama3.1", # Use llama3.1 for better performance + parameters={ + "temperature": 0, + "repeat_penalty": 1.0, + "num_predict": 2048, + }, + ) + + input = BeeInput( + llm=llm, + tools=[OpenMeteoTool()], + memory=TokenMemory(llm), + execution=BeeAgentExecutionConfig(max_iterations=10, max_retries_per_step=3, total_max_retries=10), + ) + + meta = BeeMeta(iteration=0) + + runner = DefaultRunner( + input=input, options=BeeRunOptions(execution=input.execution, signal=None), run=None + ) # TODO Figure out run + + await runner.init(BeeRunInput("What is the current weather in White Plains?")) + + final_answer: str | None = None + + while final_answer is None: + iteration: RunnerIteration = await runner.create_iteration() + + # Run tool if the iteration state includes tool call + if iteration.state.tool_name and iteration.state.tool_input: + tool_result: BeeRunnerToolResult = await runner.tool( + input=BeeRunnerToolInput( + state=BeeIterationResult( + tool_name=iteration.state.tool_name, tool_input=iteration.state.tool_input + ), + emitter=None, + meta=meta, + signal=None, + ) + ) + + iteration.state.tool_output = tool_result.output.get_text_content() + assistant_prompt = AssistantPromptTemplate.render(AssistantPromptTemplateInput(**asdict(iteration.state))) + print(assistant_prompt) + await runner.memory.add(AssistantMessage(content=assistant_prompt)) + + elif iteration.state.final_answer: + assistant_prompt = AssistantPromptTemplate.render(AssistantPromptTemplateInput(**asdict(iteration.state))) + print(assistant_prompt) + final_answer = iteration.state.final_answer + + print(final_answer) + + +if __name__ == "__main__": + asyncio.run(main()) # Runs the main coroutine diff --git a/python/examples/templates/agent_sys_prompt.py b/python/examples/templates/agent_sys_prompt.py new file mode 100644 index 00000000..b281add1 --- /dev/null +++ b/python/examples/templates/agent_sys_prompt.py @@ -0,0 +1,17 @@ +from beeai_framework.agents.runners.default.prompts import ( + SystemPromptTemplate, + SystemPromptTemplateInput, + ToolDefinition, +) +from beeai_framework.tools.weather.openmeteo import OpenMeteoTool + +tool = OpenMeteoTool() + +# Render the granite system prompt +prompt = SystemPromptTemplate.render( + SystemPromptTemplateInput( + instructions="You are a helpful AI assistant!", tools=[ToolDefinition(**tool.prompt_data())], tools_length=1 + ) +) + +print(prompt) diff --git a/python/examples/templates/basic_functions.py b/python/examples/templates/basic_functions.py new file mode 100644 index 00000000..121fdebd --- /dev/null +++ b/python/examples/templates/basic_functions.py @@ -0,0 +1,26 @@ +import os +from datetime import datetime +from zoneinfo import ZoneInfo + +from pydantic import BaseModel + +from beeai_framework.utils.templates import PromptTemplate + +os.environ["USER"] = "BeeAI" + + +class UserQuery(BaseModel): + query: str + + +template = PromptTemplate( + schema=UserQuery, + functions={ + "format_date": lambda: datetime.now(ZoneInfo("US/Eastern")).strftime("%A, %B %d, %Y at %I:%M:%S %p"), + "current_user": lambda: os.environ["USER"], + }, + template=""" +{{format_date}} +{{current_user}}: {{query}} +""", +) diff --git a/python/examples/templates/basic_template.py b/python/examples/templates/basic_template.py new file mode 100644 index 00000000..51ab1356 --- /dev/null +++ b/python/examples/templates/basic_template.py @@ -0,0 +1,18 @@ +from pydantic import BaseModel + +from beeai_framework.utils.templates import PromptTemplate + + +class UserMessage(BaseModel): + label: str + input: str + + +template = PromptTemplate( + schema=UserMessage, + template="""{{label}}: {{input}}""", +) + +prompt = template.render(UserMessage(label="Query", input="What interesting things happened on this day in history?")) + +print(prompt) diff --git a/python/examples/tools/decorator.py b/python/examples/tools/decorator.py new file mode 100644 index 00000000..f0c8b8f2 --- /dev/null +++ b/python/examples/tools/decorator.py @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio +import json +from urllib.parse import quote + +import requests +from beeai import BeeAgent, tool + +from beeai_framework.agents.types import BeeInput, BeeRunInput +from beeai_framework.backend.chat import ChatModel +from beeai_framework.memory.unconstrained_memory import UnconstrainedMemory +from beeai_framework.tools.tool import StringToolOutput +from beeai_framework.utils import BeeLogger + +logger = BeeLogger(__name__) + + +# defining a tool using the `tool` decorator +@tool +def basic_calculator(expression: str) -> int: + """ + A calculator tool that performs mathematical operations. + + Args: + expression: The mathematical expression to evaluate (e.g., "2 + 3 * 4"). + + Returns: + The result of the mathematical expression + """ + try: + encoded_expression = quote(expression) + math_url = f"https://newton.vercel.app/api/v2/simplify/{encoded_expression}" + + response = requests.get( + math_url, + headers={"Accept": "application/json"}, + ) + response.raise_for_status() + + return StringToolOutput(json.dumps(response.json())) + except Exception as e: + raise RuntimeError(f"Error evaluating expression: {e!s}") from Exception + + +async def main() -> None: + # using the tool in an agent + + chat_model = await ChatModel.from_name("ollama:granite3.1-dense:8b") + + agent = BeeAgent(BeeInput(llm=chat_model, tools=[basic_calculator], memory=UnconstrainedMemory())) + + result = await agent.run(BeeRunInput(prompt="What is the square root of 36?")) + + print(result.result.text) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/tools/duckduckgo.py b/python/examples/tools/duckduckgo.py new file mode 100644 index 00000000..f9497e69 --- /dev/null +++ b/python/examples/tools/duckduckgo.py @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from beeai_framework.agents.bee import BeeAgent +from beeai_framework.agents.types import BeeInput, BeeRunInput +from beeai_framework.backend.chat import ChatModel +from beeai_framework.memory import UnconstrainedMemory +from beeai_framework.tools.search.duckduckgo import DuckDuckGoSearchTool + + +async def main() -> None: + chat_model = await ChatModel.from_name("ollama:granite3.1-dense:8b") + agent = BeeAgent(BeeInput(llm=chat_model, tools=[DuckDuckGoSearchTool()], memory=UnconstrainedMemory())) + + result = await agent.run(BeeRunInput(prompt="How tall is the mount Everest?")) + + print(result.result.text) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/tools/openmeteo.py b/python/examples/tools/openmeteo.py new file mode 100644 index 00000000..1ce625ea --- /dev/null +++ b/python/examples/tools/openmeteo.py @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio + +from beeai_framework.agents.bee import BeeAgent +from beeai_framework.agents.types import BeeInput, BeeRunInput +from beeai_framework.backend.chat import ChatModel +from beeai_framework.memory import UnconstrainedMemory +from beeai_framework.tools.weather.openmeteo import OpenMeteoTool + + +async def main() -> None: + llm = await ChatModel.from_name("ollama:granite3.1-dense:8b") + agent = BeeAgent(BeeInput(llm=llm, tools=[OpenMeteoTool()], memory=UnconstrainedMemory())) + + result = await agent.run(BeeRunInput(prompt="What's the current weather in Las Vegas?")) + + print(result.result.text) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/version.py b/python/examples/version.py new file mode 100644 index 00000000..46409041 --- /dev/null +++ b/python/examples/version.py @@ -0,0 +1 @@ +# TODO diff --git a/python/examples/workflows/advanced.py b/python/examples/workflows/advanced.py new file mode 100644 index 00000000..60bfb1a2 --- /dev/null +++ b/python/examples/workflows/advanced.py @@ -0,0 +1,61 @@ +import asyncio +from typing import Literal, TypeAlias + +from pydantic import BaseModel, ValidationError + +from beeai_framework.workflows.workflow import Workflow, WorkflowError, WorkflowReservedStepName + + +async def main() -> None: + # State + class State(BaseModel): + x: int + y: int + abs_repetitions: int | None = None + result: int | None = None + + WorkflowStep: TypeAlias = Literal["pre_process", "add_loop", "post_process"] + + def pre_process(state: State) -> WorkflowStep: + print("pre_process") + state.abs_repetitions = abs(state.y) + return "add_loop" + + def add_loop(state: State) -> WorkflowStep | WorkflowReservedStepName: + if state.abs_repetitions and state.abs_repetitions > 0: + result = (state.result if state.result is not None else 0) + state.x + abs_repetitions = (state.abs_repetitions if state.abs_repetitions is not None else 0) - 1 + print(f"add_loop: intermediate result {result}") + state.abs_repetitions = abs_repetitions + state.result = result + return Workflow.SELF + else: + return "post_process" + + def post_process(state: State) -> WorkflowReservedStepName: + print("post_process") + if state.y < 0: + result = -(state.result if state.result is not None else 0) + state.result = result + return Workflow.END + + try: + multiplication_workflow = Workflow[State, WorkflowStep](name="MultiplicationWorkflow", schema=State) + multiplication_workflow.add_step("pre_process", pre_process) + multiplication_workflow.add_step("add_loop", add_loop) + multiplication_workflow.add_step("post_process", post_process) + + response = await multiplication_workflow.run(State(x=8, y=5)) + print(f"result: {response.state.result}") + + response = await multiplication_workflow.run(State(x=8, y=-5)) + print(f"result: {response.state.result}") + + except WorkflowError as e: + print(e) + except ValidationError as e: + print(e) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/workflows/memory.py b/python/examples/workflows/memory.py new file mode 100644 index 00000000..a752196d --- /dev/null +++ b/python/examples/workflows/memory.py @@ -0,0 +1,44 @@ +import asyncio +import traceback + +from pydantic import BaseModel, InstanceOf, ValidationError + +from beeai_framework.backend.message import AssistantMessage, UserMessage +from beeai_framework.memory.unconstrained_memory import UnconstrainedMemory +from beeai_framework.workflows.workflow import Workflow, WorkflowError + + +async def main() -> None: + # State with memory + class State(BaseModel): + memory: InstanceOf[UnconstrainedMemory] + output: str | None = None + + async def echo(state: State) -> str: + # Get the last message in memory + last_message = state.memory.messages[-1] + state.output = last_message.text[::-1] + return Workflow.END + + try: + memory = UnconstrainedMemory() + workflow = Workflow(State) + workflow.add_step("echo", echo) + + while True: + # Add user message to memory + await memory.add(UserMessage(content=input("User: "))) + # Run workflow with memory + response = await workflow.run(State(memory=memory)) + # Add assistant response to memory + await memory.add(AssistantMessage(content=response.state.output)) + + print("Assistant: ", response.state.output) + except WorkflowError: + traceback.print_exc() + except ValidationError: + traceback.print_exc() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/workflows/multi_agents.py b/python/examples/workflows/multi_agents.py new file mode 100644 index 00000000..c2377cf7 --- /dev/null +++ b/python/examples/workflows/multi_agents.py @@ -0,0 +1,60 @@ +import asyncio +import traceback + +from pydantic import ValidationError + +from beeai_framework.agents.bee.agent import BeeAgentExecutionConfig +from beeai_framework.backend.chat import ChatModel +from beeai_framework.backend.message import UserMessage +from beeai_framework.memory import UnconstrainedMemory +from beeai_framework.tools.search.duckduckgo import DuckDuckGoSearchTool +from beeai_framework.tools.weather.openmeteo import OpenMeteoTool +from beeai_framework.workflows.agent import AgentFactoryInput, AgentWorkflow +from beeai_framework.workflows.workflow import WorkflowError + + +async def main() -> None: + llm = await ChatModel.from_name("ollama:granite3.1-dense:8b") + + try: + workflow = AgentWorkflow(name="Smart assistant") + workflow.add_agent( + agent=AgentFactoryInput( + name="WeatherForecaster", + instructions="You are a weather assistant. Respond only if you can provide a useful answer.", + tools=[OpenMeteoTool()], + llm=llm, + execution=BeeAgentExecutionConfig(max_iterations=3), + ) + ) + workflow.add_agent( + agent=AgentFactoryInput( + name="Researcher", + instructions="You are a researcher assistant. Respond only if you can provide a useful answer.", + tools=[DuckDuckGoSearchTool()], + llm=llm, + ) + ) + workflow.add_agent( + agent=AgentFactoryInput( + name="Solver", + instructions="""Your task is to provide the most useful final answer based on the assistants' +responses which all are relevant. Ignore those where assistant do not know.""", + llm=llm, + ) + ) + + prompt = "What is the weather in New York?" + memory = UnconstrainedMemory() + await memory.add(UserMessage(content=prompt)) + response = await workflow.run(messages=memory.messages) + print(f"result: {response.state.final_answer}") + + except WorkflowError: + traceback.print_exc() + except ValidationError: + traceback.print_exc() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/workflows/simple.py b/python/examples/workflows/simple.py new file mode 100644 index 00000000..82376c2d --- /dev/null +++ b/python/examples/workflows/simple.py @@ -0,0 +1,29 @@ +import asyncio +import traceback + +from pydantic import BaseModel, ValidationError + +from beeai_framework.workflows.workflow import Workflow, WorkflowError + + +async def main() -> None: + # State + class State(BaseModel): + input: str + + try: + workflow = Workflow(State) + workflow.add_step("first", lambda state: print("Running first step!")) + workflow.add_step("second", lambda state: print("Running second step!")) + workflow.add_step("third", lambda state: print("Running third step!")) + + await workflow.run(State(input="Hello")) + + except WorkflowError: + traceback.print_exc() + except ValidationError: + traceback.print_exc() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/examples/workflows/web_agent.py b/python/examples/workflows/web_agent.py new file mode 100644 index 00000000..b739d54d --- /dev/null +++ b/python/examples/workflows/web_agent.py @@ -0,0 +1,92 @@ +import asyncio +import sys +import traceback + +from langchain_community.utilities import SearxSearchWrapper +from pydantic import BaseModel, Field, ValidationError + +from beeai_framework.adapters.ollama.backend.chat import OllamaChatModel +from beeai_framework.backend.chat import ChatModelOutput, ChatModelStructureOutput +from beeai_framework.backend.message import UserMessage +from beeai_framework.utils.templates import PromptTemplate +from beeai_framework.workflows.workflow import Workflow, WorkflowError + + +async def main() -> None: + llm = OllamaChatModel("granite3.1-dense:8b") + search = SearxSearchWrapper(searx_host="http://127.0.0.1:8888") + + class State(BaseModel): + input: str + search_results: str | None = None + output: str | None = None + + class InputSchema(BaseModel): + input: str + + class WebSearchQuery(BaseModel): + search_query: str = Field(description="Search query.") + + class RAGSchema(InputSchema): + input: str + search_results: str + + async def web_search(state: State) -> str: + print("Step: ", sys._getframe().f_code.co_name) + prompt = PromptTemplate( + schema=InputSchema, + template=""" + Please create a web search query for the following input. + Query: {{input}}""", + ).render(InputSchema(input=state.input)) + + output: ChatModelStructureOutput = await llm.create_structure( + { + "schema": WebSearchQuery, + "messages": [UserMessage(prompt)], + } + ) + # TODO Why is object not of type schema T? + state.search_results = search.run(f"current weather in {output.object['search_query']}") + return Workflow.NEXT + + async def generate_output(state: State) -> str: + print("Step: ", sys._getframe().f_code.co_name) + + prompt = PromptTemplate( + schema=RAGSchema, + template=""" + Use the following search results to answer the query accurately. If the results are irrelevant or insufficient, say 'I don't know.' + + Search Results: + {{search_results}} + + Query: {{input}} + """, # noqa: E501 + ).render(RAGSchema(input=state.input, search_results=state.search_results or "No results available.")) + + output: ChatModelOutput = await llm.create({"messages": [UserMessage(prompt)]}) + state.output = output.get_text_content() + return Workflow.END + + try: + # Define the structure of the workflow graph + workflow = Workflow(State) + workflow.add_step("web_search", web_search) + workflow.add_step("generate_output", generate_output) + + # Execute the workflow + result = await workflow.run(State(input="What is the demon core?")) + + print("\n*********************") + print("Input: ", result.state.input) + print("Agent: ", result.state.output) + + except WorkflowError: + traceback.print_exc() + except ValidationError: + traceback.print_exc() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/poetry.lock b/python/poetry.lock new file mode 100644 index 00000000..c5940ea0 --- /dev/null +++ b/python/poetry.lock @@ -0,0 +1,3295 @@ +# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. + +[[package]] +name = "aiofiles" +version = "24.1.0" +description = "File support for asyncio." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5"}, + {file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"}, +] + +[[package]] +name = "aiohappyeyeballs" +version = "2.4.6" +description = "Happy Eyeballs for asyncio" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "aiohappyeyeballs-2.4.6-py3-none-any.whl", hash = "sha256:147ec992cf873d74f5062644332c539fcd42956dc69453fe5204195e560517e1"}, + {file = "aiohappyeyeballs-2.4.6.tar.gz", hash = "sha256:9b05052f9042985d32ecbe4b59a77ae19c006a78f1344d7fdad69d28ded3d0b0"}, +] + +[[package]] +name = "aiohttp" +version = "3.11.12" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "aiohttp-3.11.12-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:aa8a8caca81c0a3e765f19c6953416c58e2f4cc1b84829af01dd1c771bb2f91f"}, + {file = "aiohttp-3.11.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:84ede78acde96ca57f6cf8ccb8a13fbaf569f6011b9a52f870c662d4dc8cd854"}, + {file = "aiohttp-3.11.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:584096938a001378484aa4ee54e05dc79c7b9dd933e271c744a97b3b6f644957"}, + {file = "aiohttp-3.11.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:392432a2dde22b86f70dd4a0e9671a349446c93965f261dbaecfaf28813e5c42"}, + {file = "aiohttp-3.11.12-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:88d385b8e7f3a870146bf5ea31786ef7463e99eb59e31db56e2315535d811f55"}, + {file = "aiohttp-3.11.12-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b10a47e5390c4b30a0d58ee12581003be52eedd506862ab7f97da7a66805befb"}, + {file = "aiohttp-3.11.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b5263dcede17b6b0c41ef0c3ccce847d82a7da98709e75cf7efde3e9e3b5cae"}, + {file = "aiohttp-3.11.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50c5c7b8aa5443304c55c262c5693b108c35a3b61ef961f1e782dd52a2f559c7"}, + {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d1c031a7572f62f66f1257db37ddab4cb98bfaf9b9434a3b4840bf3560f5e788"}, + {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:7e44eba534381dd2687be50cbd5f2daded21575242ecfdaf86bbeecbc38dae8e"}, + {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:145a73850926018ec1681e734cedcf2716d6a8697d90da11284043b745c286d5"}, + {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:2c311e2f63e42c1bf86361d11e2c4a59f25d9e7aabdbdf53dc38b885c5435cdb"}, + {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:ea756b5a7bac046d202a9a3889b9a92219f885481d78cd318db85b15cc0b7bcf"}, + {file = "aiohttp-3.11.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:526c900397f3bbc2db9cb360ce9c35134c908961cdd0ac25b1ae6ffcaa2507ff"}, + {file = "aiohttp-3.11.12-cp310-cp310-win32.whl", hash = "sha256:b8d3bb96c147b39c02d3db086899679f31958c5d81c494ef0fc9ef5bb1359b3d"}, + {file = "aiohttp-3.11.12-cp310-cp310-win_amd64.whl", hash = "sha256:7fe3d65279bfbee8de0fb4f8c17fc4e893eed2dba21b2f680e930cc2b09075c5"}, + {file = "aiohttp-3.11.12-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:87a2e00bf17da098d90d4145375f1d985a81605267e7f9377ff94e55c5d769eb"}, + {file = "aiohttp-3.11.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b34508f1cd928ce915ed09682d11307ba4b37d0708d1f28e5774c07a7674cac9"}, + {file = "aiohttp-3.11.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:936d8a4f0f7081327014742cd51d320296b56aa6d324461a13724ab05f4b2933"}, + {file = "aiohttp-3.11.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de1378f72def7dfb5dbd73d86c19eda0ea7b0a6873910cc37d57e80f10d64e1"}, + {file = "aiohttp-3.11.12-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9d45dbb3aaec05cf01525ee1a7ac72de46a8c425cb75c003acd29f76b1ffe94"}, + {file = "aiohttp-3.11.12-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:930ffa1925393381e1e0a9b82137fa7b34c92a019b521cf9f41263976666a0d6"}, + {file = "aiohttp-3.11.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8340def6737118f5429a5df4e88f440746b791f8f1c4ce4ad8a595f42c980bd5"}, + {file = "aiohttp-3.11.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4016e383f91f2814e48ed61e6bda7d24c4d7f2402c75dd28f7e1027ae44ea204"}, + {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3c0600bcc1adfaaac321422d615939ef300df81e165f6522ad096b73439c0f58"}, + {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:0450ada317a65383b7cce9576096150fdb97396dcfe559109b403c7242faffef"}, + {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:850ff6155371fd802a280f8d369d4e15d69434651b844bde566ce97ee2277420"}, + {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:8fd12d0f989c6099e7b0f30dc6e0d1e05499f3337461f0b2b0dadea6c64b89df"}, + {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:76719dd521c20a58a6c256d058547b3a9595d1d885b830013366e27011ffe804"}, + {file = "aiohttp-3.11.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:97fe431f2ed646a3b56142fc81d238abcbaff08548d6912acb0b19a0cadc146b"}, + {file = "aiohttp-3.11.12-cp311-cp311-win32.whl", hash = "sha256:e10c440d142fa8b32cfdb194caf60ceeceb3e49807072e0dc3a8887ea80e8c16"}, + {file = "aiohttp-3.11.12-cp311-cp311-win_amd64.whl", hash = "sha256:246067ba0cf5560cf42e775069c5d80a8989d14a7ded21af529a4e10e3e0f0e6"}, + {file = "aiohttp-3.11.12-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e392804a38353900c3fd8b7cacbea5132888f7129f8e241915e90b85f00e3250"}, + {file = "aiohttp-3.11.12-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8fa1510b96c08aaad49303ab11f8803787c99222288f310a62f493faf883ede1"}, + {file = "aiohttp-3.11.12-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:dc065a4285307607df3f3686363e7f8bdd0d8ab35f12226362a847731516e42c"}, + {file = "aiohttp-3.11.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddb31f8474695cd61fc9455c644fc1606c164b93bff2490390d90464b4655df"}, + {file = "aiohttp-3.11.12-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9dec0000d2d8621d8015c293e24589d46fa218637d820894cb7356c77eca3259"}, + {file = "aiohttp-3.11.12-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3552fe98e90fdf5918c04769f338a87fa4f00f3b28830ea9b78b1bdc6140e0d"}, + {file = "aiohttp-3.11.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dfe7f984f28a8ae94ff3a7953cd9678550dbd2a1f9bda5dd9c5ae627744c78e"}, + {file = "aiohttp-3.11.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a481a574af914b6e84624412666cbfbe531a05667ca197804ecc19c97b8ab1b0"}, + {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1987770fb4887560363b0e1a9b75aa303e447433c41284d3af2840a2f226d6e0"}, + {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:a4ac6a0f0f6402854adca4e3259a623f5c82ec3f0c049374133bcb243132baf9"}, + {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c96a43822f1f9f69cc5c3706af33239489a6294be486a0447fb71380070d4d5f"}, + {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a5e69046f83c0d3cb8f0d5bd9b8838271b1bc898e01562a04398e160953e8eb9"}, + {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:68d54234c8d76d8ef74744f9f9fc6324f1508129e23da8883771cdbb5818cbef"}, + {file = "aiohttp-3.11.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c9fd9dcf9c91affe71654ef77426f5cf8489305e1c66ed4816f5a21874b094b9"}, + {file = "aiohttp-3.11.12-cp312-cp312-win32.whl", hash = "sha256:0ed49efcd0dc1611378beadbd97beb5d9ca8fe48579fc04a6ed0844072261b6a"}, + {file = "aiohttp-3.11.12-cp312-cp312-win_amd64.whl", hash = "sha256:54775858c7f2f214476773ce785a19ee81d1294a6bedc5cc17225355aab74802"}, + {file = "aiohttp-3.11.12-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:413ad794dccb19453e2b97c2375f2ca3cdf34dc50d18cc2693bd5aed7d16f4b9"}, + {file = "aiohttp-3.11.12-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4a93d28ed4b4b39e6f46fd240896c29b686b75e39cc6992692e3922ff6982b4c"}, + {file = "aiohttp-3.11.12-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d589264dbba3b16e8951b6f145d1e6b883094075283dafcab4cdd564a9e353a0"}, + {file = "aiohttp-3.11.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5148ca8955affdfeb864aca158ecae11030e952b25b3ae15d4e2b5ba299bad2"}, + {file = "aiohttp-3.11.12-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:525410e0790aab036492eeea913858989c4cb070ff373ec3bc322d700bdf47c1"}, + {file = "aiohttp-3.11.12-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bd8695be2c80b665ae3f05cb584093a1e59c35ecb7d794d1edd96e8cc9201d7"}, + {file = "aiohttp-3.11.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0203433121484b32646a5f5ea93ae86f3d9559d7243f07e8c0eab5ff8e3f70e"}, + {file = "aiohttp-3.11.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40cd36749a1035c34ba8d8aaf221b91ca3d111532e5ccb5fa8c3703ab1b967ed"}, + {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a7442662afebbf7b4c6d28cb7aab9e9ce3a5df055fc4116cc7228192ad6cb484"}, + {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:8a2fb742ef378284a50766e985804bd6adb5adb5aa781100b09befdbfa757b65"}, + {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2cee3b117a8d13ab98b38d5b6bdcd040cfb4181068d05ce0c474ec9db5f3c5bb"}, + {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f6a19bcab7fbd8f8649d6595624856635159a6527861b9cdc3447af288a00c00"}, + {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e4cecdb52aaa9994fbed6b81d4568427b6002f0a91c322697a4bfcc2b2363f5a"}, + {file = "aiohttp-3.11.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:30f546358dfa0953db92ba620101fefc81574f87b2346556b90b5f3ef16e55ce"}, + {file = "aiohttp-3.11.12-cp313-cp313-win32.whl", hash = "sha256:ce1bb21fc7d753b5f8a5d5a4bae99566386b15e716ebdb410154c16c91494d7f"}, + {file = "aiohttp-3.11.12-cp313-cp313-win_amd64.whl", hash = "sha256:f7914ab70d2ee8ab91c13e5402122edbc77821c66d2758abb53aabe87f013287"}, + {file = "aiohttp-3.11.12-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7c3623053b85b4296cd3925eeb725e386644fd5bc67250b3bb08b0f144803e7b"}, + {file = "aiohttp-3.11.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:67453e603cea8e85ed566b2700efa1f6916aefbc0c9fcb2e86aaffc08ec38e78"}, + {file = "aiohttp-3.11.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6130459189e61baac5a88c10019b21e1f0c6d00ebc770e9ce269475650ff7f73"}, + {file = "aiohttp-3.11.12-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9060addfa4ff753b09392efe41e6af06ea5dd257829199747b9f15bfad819460"}, + {file = "aiohttp-3.11.12-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34245498eeb9ae54c687a07ad7f160053911b5745e186afe2d0c0f2898a1ab8a"}, + {file = "aiohttp-3.11.12-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8dc0fba9a74b471c45ca1a3cb6e6913ebfae416678d90529d188886278e7f3f6"}, + {file = "aiohttp-3.11.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a478aa11b328983c4444dacb947d4513cb371cd323f3845e53caeda6be5589d5"}, + {file = "aiohttp-3.11.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c160a04283c8c6f55b5bf6d4cad59bb9c5b9c9cd08903841b25f1f7109ef1259"}, + {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:edb69b9589324bdc40961cdf0657815df674f1743a8d5ad9ab56a99e4833cfdd"}, + {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4ee84c2a22a809c4f868153b178fe59e71423e1f3d6a8cd416134bb231fbf6d3"}, + {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:bf4480a5438f80e0f1539e15a7eb8b5f97a26fe087e9828e2c0ec2be119a9f72"}, + {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:e6b2732ef3bafc759f653a98881b5b9cdef0716d98f013d376ee8dfd7285abf1"}, + {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:f752e80606b132140883bb262a457c475d219d7163d996dc9072434ffb0784c4"}, + {file = "aiohttp-3.11.12-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ab3247d58b393bda5b1c8f31c9edece7162fc13265334217785518dd770792b8"}, + {file = "aiohttp-3.11.12-cp39-cp39-win32.whl", hash = "sha256:0d5176f310a7fe6f65608213cc74f4228e4f4ce9fd10bcb2bb6da8fc66991462"}, + {file = "aiohttp-3.11.12-cp39-cp39-win_amd64.whl", hash = "sha256:74bd573dde27e58c760d9ca8615c41a57e719bff315c9adb6f2a4281a28e8798"}, + {file = "aiohttp-3.11.12.tar.gz", hash = "sha256:7603ca26d75b1b86160ce1bbe2787a0b706e592af5b2504e12caa88a217767b0"}, +] + +[package.dependencies] +aiohappyeyeballs = ">=2.3.0" +aiosignal = ">=1.1.2" +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +propcache = ">=0.2.0" +yarl = ">=1.17.0,<2.0" + +[package.extras] +speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.2.0) ; sys_platform == \"linux\" or sys_platform == \"darwin\"", "brotlicffi ; platform_python_implementation != \"CPython\""] + +[[package]] +name = "aiosignal" +version = "1.3.2" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"}, + {file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "anyio" +version = "4.8.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a"}, + {file = "anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a"}, +] + +[package.dependencies] +idna = ">=2.8" +sniffio = ">=1.1" +typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} + +[package.extras] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""] +trio = ["trio (>=0.26.1)"] + +[[package]] +name = "argcomplete" +version = "3.5.3" +description = "Bash tab completion for argparse" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "argcomplete-3.5.3-py3-none-any.whl", hash = "sha256:2ab2c4a215c59fd6caaff41a869480a23e8f6a5f910b266c1808037f4e375b61"}, + {file = "argcomplete-3.5.3.tar.gz", hash = "sha256:c12bf50eded8aebb298c7b7da7a5ff3ee24dffd9f5281867dfe1424b58c55392"}, +] + +[package.extras] +test = ["coverage", "mypy", "pexpect", "ruff", "wheel"] + +[[package]] +name = "astroid" +version = "3.3.8" +description = "An abstract syntax tree for Python with inference support." +optional = false +python-versions = ">=3.9.0" +groups = ["main"] +files = [ + {file = "astroid-3.3.8-py3-none-any.whl", hash = "sha256:187ccc0c248bfbba564826c26f070494f7bc964fd286b6d9fff4420e55de828c"}, + {file = "astroid-3.3.8.tar.gz", hash = "sha256:a88c7994f914a4ea8572fac479459f4955eeccc877be3f2d959a33273b0cf40b"}, +] + +[[package]] +name = "attrs" +version = "25.1.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "attrs-25.1.0-py3-none-any.whl", hash = "sha256:c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a"}, + {file = "attrs-25.1.0.tar.gz", hash = "sha256:1c97078a80c814273a76b2a298a932eb681c87415c11dee0a6921de7f1b02c3e"}, +] + +[package.extras] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] + +[[package]] +name = "beautifulsoup4" +version = "4.13.3" +description = "Screen-scraping library" +optional = false +python-versions = ">=3.7.0" +groups = ["main"] +files = [ + {file = "beautifulsoup4-4.13.3-py3-none-any.whl", hash = "sha256:99045d7d3f08f91f0d656bc9b7efbae189426cd913d830294a15eefa0ea4df16"}, + {file = "beautifulsoup4-4.13.3.tar.gz", hash = "sha256:1bd32405dacc920b42b83ba01644747ed77456a65760e285fbc47633ceddaf8b"}, +] + +[package.dependencies] +soupsieve = ">1.2" +typing-extensions = ">=4.0.0" + +[package.extras] +cchardet = ["cchardet"] +chardet = ["chardet"] +charset-normalizer = ["charset-normalizer"] +html5lib = ["html5lib"] +lxml = ["lxml"] + +[[package]] +name = "brotli" +version = "1.1.0" +description = "Python bindings for the Brotli compression library" +optional = false +python-versions = "*" +groups = ["main"] +markers = "platform_python_implementation == \"CPython\"" +files = [ + {file = "Brotli-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e1140c64812cb9b06c922e77f1c26a75ec5e3f0fb2bf92cc8c58720dec276752"}, + {file = "Brotli-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c8fd5270e906eef71d4a8d19b7c6a43760c6abcfcc10c9101d14eb2357418de9"}, + {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ae56aca0402a0f9a3431cddda62ad71666ca9d4dc3a10a142b9dce2e3c0cda3"}, + {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:43ce1b9935bfa1ede40028054d7f48b5469cd02733a365eec8a329ffd342915d"}, + {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:7c4855522edb2e6ae7fdb58e07c3ba9111e7621a8956f481c68d5d979c93032e"}, + {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:38025d9f30cf4634f8309c6874ef871b841eb3c347e90b0851f63d1ded5212da"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e6a904cb26bfefc2f0a6f240bdf5233be78cd2488900a2f846f3c3ac8489ab80"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a37b8f0391212d29b3a91a799c8e4a2855e0576911cdfb2515487e30e322253d"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e84799f09591700a4154154cab9787452925578841a94321d5ee8fb9a9a328f0"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f66b5337fa213f1da0d9000bc8dc0cb5b896b726eefd9c6046f699b169c41b9e"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5dab0844f2cf82be357a0eb11a9087f70c5430b2c241493fc122bb6f2bb0917c"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e4fe605b917c70283db7dfe5ada75e04561479075761a0b3866c081d035b01c1"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:1e9a65b5736232e7a7f91ff3d02277f11d339bf34099a56cdab6a8b3410a02b2"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:58d4b711689366d4a03ac7957ab8c28890415e267f9b6589969e74b6e42225ec"}, + {file = "Brotli-1.1.0-cp310-cp310-win32.whl", hash = "sha256:be36e3d172dc816333f33520154d708a2657ea63762ec16b62ece02ab5e4daf2"}, + {file = "Brotli-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:0c6244521dda65ea562d5a69b9a26120769b7a9fb3db2fe9545935ed6735b128"}, + {file = "Brotli-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a3daabb76a78f829cafc365531c972016e4aa8d5b4bf60660ad8ecee19df7ccc"}, + {file = "Brotli-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c8146669223164fc87a7e3de9f81e9423c67a79d6b3447994dfb9c95da16e2d6"}, + {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30924eb4c57903d5a7526b08ef4a584acc22ab1ffa085faceb521521d2de32dd"}, + {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ceb64bbc6eac5a140ca649003756940f8d6a7c444a68af170b3187623b43bebf"}, + {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a469274ad18dc0e4d316eefa616d1d0c2ff9da369af19fa6f3daa4f09671fd61"}, + {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:524f35912131cc2cabb00edfd8d573b07f2d9f21fa824bd3fb19725a9cf06327"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5b3cc074004d968722f51e550b41a27be656ec48f8afaeeb45ebf65b561481dd"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:19c116e796420b0cee3da1ccec3b764ed2952ccfcc298b55a10e5610ad7885f9"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:510b5b1bfbe20e1a7b3baf5fed9e9451873559a976c1a78eebaa3b86c57b4265"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a1fd8a29719ccce974d523580987b7f8229aeace506952fa9ce1d53a033873c8"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c247dd99d39e0338a604f8c2b3bc7061d5c2e9e2ac7ba9cc1be5a69cb6cd832f"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1b2c248cd517c222d89e74669a4adfa5577e06ab68771a529060cf5a156e9757"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2a24c50840d89ded6c9a8fdc7b6ed3692ed4e86f1c4a4a938e1e92def92933e0"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f31859074d57b4639318523d6ffdca586ace54271a73ad23ad021acd807eb14b"}, + {file = "Brotli-1.1.0-cp311-cp311-win32.whl", hash = "sha256:39da8adedf6942d76dc3e46653e52df937a3c4d6d18fdc94a7c29d263b1f5b50"}, + {file = "Brotli-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:aac0411d20e345dc0920bdec5548e438e999ff68d77564d5e9463a7ca9d3e7b1"}, + {file = "Brotli-1.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:32d95b80260d79926f5fab3c41701dbb818fde1c9da590e77e571eefd14abe28"}, + {file = "Brotli-1.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b760c65308ff1e462f65d69c12e4ae085cff3b332d894637f6273a12a482d09f"}, + {file = "Brotli-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:316cc9b17edf613ac76b1f1f305d2a748f1b976b033b049a6ecdfd5612c70409"}, + {file = "Brotli-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:caf9ee9a5775f3111642d33b86237b05808dafcd6268faa492250e9b78046eb2"}, + {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70051525001750221daa10907c77830bc889cb6d865cc0b813d9db7fefc21451"}, + {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f4bf76817c14aa98cc6697ac02f3972cb8c3da93e9ef16b9c66573a68014f91"}, + {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0c5516f0aed654134a2fc936325cc2e642f8a0e096d075209672eb321cff408"}, + {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c3020404e0b5eefd7c9485ccf8393cfb75ec38ce75586e046573c9dc29967a0"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ed11165dd45ce798d99a136808a794a748d5dc38511303239d4e2363c0695dc"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4093c631e96fdd49e0377a9c167bfd75b6d0bad2ace734c6eb20b348bc3ea180"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e4c4629ddad63006efa0ef968c8e4751c5868ff0b1c5c40f76524e894c50248"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:861bf317735688269936f755fa136a99d1ed526883859f86e41a5d43c61d8966"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:87a3044c3a35055527ac75e419dfa9f4f3667a1e887ee80360589eb8c90aabb9"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c5529b34c1c9d937168297f2c1fde7ebe9ebdd5e121297ff9c043bdb2ae3d6fb"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ca63e1890ede90b2e4454f9a65135a4d387a4585ff8282bb72964fab893f2111"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e79e6520141d792237c70bcd7a3b122d00f2613769ae0cb61c52e89fd3443839"}, + {file = "Brotli-1.1.0-cp312-cp312-win32.whl", hash = "sha256:5f4d5ea15c9382135076d2fb28dde923352fe02951e66935a9efaac8f10e81b0"}, + {file = "Brotli-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:906bc3a79de8c4ae5b86d3d75a8b77e44404b0f4261714306e3ad248d8ab0951"}, + {file = "Brotli-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8bf32b98b75c13ec7cf774164172683d6e7891088f6316e54425fde1efc276d5"}, + {file = "Brotli-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7bc37c4d6b87fb1017ea28c9508b36bbcb0c3d18b4260fcdf08b200c74a6aee8"}, + {file = "Brotli-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c0ef38c7a7014ffac184db9e04debe495d317cc9c6fb10071f7fefd93100a4f"}, + {file = "Brotli-1.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91d7cc2a76b5567591d12c01f019dd7afce6ba8cba6571187e21e2fc418ae648"}, + {file = "Brotli-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a93dde851926f4f2678e704fadeb39e16c35d8baebd5252c9fd94ce8ce68c4a0"}, + {file = "Brotli-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0db75f47be8b8abc8d9e31bc7aad0547ca26f24a54e6fd10231d623f183d089"}, + {file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6967ced6730aed543b8673008b5a391c3b1076d834ca438bbd70635c73775368"}, + {file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7eedaa5d036d9336c95915035fb57422054014ebdeb6f3b42eac809928e40d0c"}, + {file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d487f5432bf35b60ed625d7e1b448e2dc855422e87469e3f450aa5552b0eb284"}, + {file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:832436e59afb93e1836081a20f324cb185836c617659b07b129141a8426973c7"}, + {file = "Brotli-1.1.0-cp313-cp313-win32.whl", hash = "sha256:43395e90523f9c23a3d5bdf004733246fba087f2948f87ab28015f12359ca6a0"}, + {file = "Brotli-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:9011560a466d2eb3f5a6e4929cf4a09be405c64154e12df0dd72713f6500e32b"}, + {file = "Brotli-1.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a090ca607cbb6a34b0391776f0cb48062081f5f60ddcce5d11838e67a01928d1"}, + {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de9d02f5bda03d27ede52e8cfe7b865b066fa49258cbab568720aa5be80a47d"}, + {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2333e30a5e00fe0fe55903c8832e08ee9c3b1382aacf4db26664a16528d51b4b"}, + {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4d4a848d1837973bf0f4b5e54e3bec977d99be36a7895c61abb659301b02c112"}, + {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fdc3ff3bfccdc6b9cc7c342c03aa2400683f0cb891d46e94b64a197910dc4064"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:5eeb539606f18a0b232d4ba45adccde4125592f3f636a6182b4a8a436548b914"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:fd5f17ff8f14003595ab414e45fce13d073e0762394f957182e69035c9f3d7c2"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:069a121ac97412d1fe506da790b3e69f52254b9df4eb665cd42460c837193354"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:e93dfc1a1165e385cc8239fab7c036fb2cd8093728cbd85097b284d7b99249a2"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:aea440a510e14e818e67bfc4027880e2fb500c2ccb20ab21c7a7c8b5b4703d75"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_i686.whl", hash = "sha256:6974f52a02321b36847cd19d1b8e381bf39939c21efd6ee2fc13a28b0d99348c"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_ppc64le.whl", hash = "sha256:a7e53012d2853a07a4a79c00643832161a910674a893d296c9f1259859a289d2"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:d7702622a8b40c49bffb46e1e3ba2e81268d5c04a34f460978c6b5517a34dd52"}, + {file = "Brotli-1.1.0-cp36-cp36m-win32.whl", hash = "sha256:a599669fd7c47233438a56936988a2478685e74854088ef5293802123b5b2460"}, + {file = "Brotli-1.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:d143fd47fad1db3d7c27a1b1d66162e855b5d50a89666af46e1679c496e8e579"}, + {file = "Brotli-1.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:11d00ed0a83fa22d29bc6b64ef636c4552ebafcef57154b4ddd132f5638fbd1c"}, + {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f733d788519c7e3e71f0855c96618720f5d3d60c3cb829d8bbb722dddce37985"}, + {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:929811df5462e182b13920da56c6e0284af407d1de637d8e536c5cd00a7daf60"}, + {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0b63b949ff929fbc2d6d3ce0e924c9b93c9785d877a21a1b678877ffbbc4423a"}, + {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d192f0f30804e55db0d0e0a35d83a9fead0e9a359a9ed0285dbacea60cc10a84"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f296c40e23065d0d6650c4aefe7470d2a25fffda489bcc3eb66083f3ac9f6643"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:919e32f147ae93a09fe064d77d5ebf4e35502a8df75c29fb05788528e330fe74"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:23032ae55523cc7bccb4f6a0bf368cd25ad9bcdcc1990b64a647e7bbcce9cb5b"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:224e57f6eac61cc449f498cc5f0e1725ba2071a3d4f48d5d9dffba42db196438"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:cb1dac1770878ade83f2ccdf7d25e494f05c9165f5246b46a621cc849341dc01"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:3ee8a80d67a4334482d9712b8e83ca6b1d9bc7e351931252ebef5d8f7335a547"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:5e55da2c8724191e5b557f8e18943b1b4839b8efc3ef60d65985bcf6f587dd38"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:d342778ef319e1026af243ed0a07c97acf3bad33b9f29e7ae6a1f68fd083e90c"}, + {file = "Brotli-1.1.0-cp37-cp37m-win32.whl", hash = "sha256:587ca6d3cef6e4e868102672d3bd9dc9698c309ba56d41c2b9c85bbb903cdb95"}, + {file = "Brotli-1.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2954c1c23f81c2eaf0b0717d9380bd348578a94161a65b3a2afc62c86467dd68"}, + {file = "Brotli-1.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:efa8b278894b14d6da122a72fefcebc28445f2d3f880ac59d46c90f4c13be9a3"}, + {file = "Brotli-1.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:03d20af184290887bdea3f0f78c4f737d126c74dc2f3ccadf07e54ceca3bf208"}, + {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6172447e1b368dcbc458925e5ddaf9113477b0ed542df258d84fa28fc45ceea7"}, + {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a743e5a28af5f70f9c080380a5f908d4d21d40e8f0e0c8901604d15cfa9ba751"}, + {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0541e747cce78e24ea12d69176f6a7ddb690e62c425e01d31cc065e69ce55b48"}, + {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cdbc1fc1bc0bff1cef838eafe581b55bfbffaed4ed0318b724d0b71d4d377619"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:890b5a14ce214389b2cc36ce82f3093f96f4cc730c1cffdbefff77a7c71f2a97"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ab4fbee0b2d9098c74f3057b2bc055a8bd92ccf02f65944a241b4349229185a"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:141bd4d93984070e097521ed07e2575b46f817d08f9fa42b16b9b5f27b5ac088"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fce1473f3ccc4187f75b4690cfc922628aed4d3dd013d047f95a9b3919a86596"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d2b35ca2c7f81d173d2fadc2f4f31e88cc5f7a39ae5b6db5513cf3383b0e0ec7"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:af6fa6817889314555aede9a919612b23739395ce767fe7fcbea9a80bf140fe5"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:2feb1d960f760a575dbc5ab3b1c00504b24caaf6986e2dc2b01c09c87866a943"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4410f84b33374409552ac9b6903507cdb31cd30d2501fc5ca13d18f73548444a"}, + {file = "Brotli-1.1.0-cp38-cp38-win32.whl", hash = "sha256:db85ecf4e609a48f4b29055f1e144231b90edc90af7481aa731ba2d059226b1b"}, + {file = "Brotli-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3d7954194c36e304e1523f55d7042c59dc53ec20dd4e9ea9d151f1b62b4415c0"}, + {file = "Brotli-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5fb2ce4b8045c78ebbc7b8f3c15062e435d47e7393cc57c25115cfd49883747a"}, + {file = "Brotli-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7905193081db9bfa73b1219140b3d315831cbff0d8941f22da695832f0dd188f"}, + {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a77def80806c421b4b0af06f45d65a136e7ac0bdca3c09d9e2ea4e515367c7e9"}, + {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8dadd1314583ec0bf2d1379f7008ad627cd6336625d6679cf2f8e67081b83acf"}, + {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:901032ff242d479a0efa956d853d16875d42157f98951c0230f69e69f9c09bac"}, + {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:22fc2a8549ffe699bfba2256ab2ed0421a7b8fadff114a3d201794e45a9ff578"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ae15b066e5ad21366600ebec29a7ccbc86812ed267e4b28e860b8ca16a2bc474"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:949f3b7c29912693cee0afcf09acd6ebc04c57af949d9bf77d6101ebb61e388c"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:89f4988c7203739d48c6f806f1e87a1d96e0806d44f0fba61dba81392c9e474d"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:de6551e370ef19f8de1807d0a9aa2cdfdce2e85ce88b122fe9f6b2b076837e59"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0737ddb3068957cf1b054899b0883830bb1fec522ec76b1098f9b6e0f02d9419"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4f3607b129417e111e30637af1b56f24f7a49e64763253bbc275c75fa887d4b2"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:6c6e0c425f22c1c719c42670d561ad682f7bfeeef918edea971a79ac5252437f"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:494994f807ba0b92092a163a0a283961369a65f6cbe01e8891132b7a320e61eb"}, + {file = "Brotli-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f0d8a7a6b5983c2496e364b969f0e526647a06b075d034f3297dc66f3b360c64"}, + {file = "Brotli-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cdad5b9014d83ca68c25d2e9444e28e967ef16e80f6b436918c700c117a85467"}, + {file = "Brotli-1.1.0.tar.gz", hash = "sha256:81de08ac11bcb85841e440c13611c00b67d3bf82698314928d0b676362546724"}, +] + +[[package]] +name = "brotlicffi" +version = "1.1.0.0" +description = "Python CFFI bindings to the Brotli library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "platform_python_implementation != \"CPython\"" +files = [ + {file = "brotlicffi-1.1.0.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9b7ae6bd1a3f0df532b6d67ff674099a96d22bc0948955cb338488c31bfb8851"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19ffc919fa4fc6ace69286e0a23b3789b4219058313cf9b45625016bf7ff996b"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9feb210d932ffe7798ee62e6145d3a757eb6233aa9a4e7db78dd3690d7755814"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84763dbdef5dd5c24b75597a77e1b30c66604725707565188ba54bab4f114820"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-win32.whl", hash = "sha256:1b12b50e07c3911e1efa3a8971543e7648100713d4e0971b13631cce22c587eb"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:994a4f0681bb6c6c3b0925530a1926b7a189d878e6e5e38fae8efa47c5d9c613"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2e4aeb0bd2540cb91b069dbdd54d458da8c4334ceaf2d25df2f4af576d6766ca"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b7b0033b0d37bb33009fb2fef73310e432e76f688af76c156b3594389d81391"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54a07bb2374a1eba8ebb52b6fafffa2afd3c4df85ddd38fcc0511f2bb387c2a8"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7901a7dc4b88f1c1475de59ae9be59799db1007b7d059817948d8e4f12e24e35"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce01c7316aebc7fce59da734286148b1d1b9455f89cf2c8a4dfce7d41db55c2d"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:246f1d1a90279bb6069de3de8d75a8856e073b8ff0b09dcca18ccc14cec85979"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc4bc5d82bc56ebd8b514fb8350cfac4627d6b0743382e46d033976a5f80fab6"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37c26ecb14386a44b118ce36e546ce307f4810bc9598a6e6cb4f7fca725ae7e6"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca72968ae4eaf6470498d5c2887073f7efe3b1e7d7ec8be11a06a79cc810e990"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:add0de5b9ad9e9aa293c3aa4e9deb2b61e99ad6c1634e01d01d98c03e6a354cc"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9b6068e0f3769992d6b622a1cd2e7835eae3cf8d9da123d7f51ca9c1e9c333e5"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8557a8559509b61e65083f8782329188a250102372576093c88930c875a69838"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a7ae37e5d79c5bdfb5b4b99f2715a6035e6c5bf538c3746abc8e26694f92f33"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391151ec86bb1c683835980f4816272a87eaddc46bb91cbf44f62228b84d8cca"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2f3711be9290f0453de8eed5275d93d286abe26b08ab4a35d7452caa1fef532f"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1a807d760763e398bbf2c6394ae9da5815901aa93ee0a37bca5efe78d4ee3171"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa8ca0623b26c94fccc3a1fdd895be1743b838f3917300506d04aa3346fd2a14"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3de0cf28a53a3238b252aca9fed1593e9d36c1d116748013339f0949bfc84112"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6be5ec0e88a4925c91f3dea2bb0013b3a2accda6f77238f76a34a1ea532a1cb0"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d9eb71bb1085d996244439154387266fd23d6ad37161f6f52f1cd41dd95a3808"}, + {file = "brotlicffi-1.1.0.0.tar.gz", hash = "sha256:b77827a689905143f87915310b93b273ab17888fd43ef350d4832c4a71083c13"}, +] + +[package.dependencies] +cffi = ">=1.0.0" + +[[package]] +name = "cachetools" +version = "5.5.1" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "cachetools-5.5.1-py3-none-any.whl", hash = "sha256:b76651fdc3b24ead3c648bbdeeb940c1b04d365b38b4af66788f9ec4a81d42bb"}, + {file = "cachetools-5.5.1.tar.gz", hash = "sha256:70f238fbba50383ef62e55c6aff6d9673175fe59f7c6782c7a0b9e38f4a9df95"}, +] + +[[package]] +name = "certifi" +version = "2025.1.31" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, + {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, +] + +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_python_implementation != \"CPython\"" +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "chardet" +version = "5.2.0" +description = "Universal encoding detector for Python 3" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, + {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.1" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"}, + {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, + {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, +] + +[[package]] +name = "chevron" +version = "0.14.0" +description = "Mustache templating language renderer" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "chevron-0.14.0-py3-none-any.whl", hash = "sha256:fbf996a709f8da2e745ef763f482ce2d311aa817d287593a5b990d6d6e4f0443"}, + {file = "chevron-0.14.0.tar.gz", hash = "sha256:87613aafdf6d77b6a90ff073165a61ae5086e21ad49057aa0e53681601800ebf"}, +] + +[[package]] +name = "click" +version = "8.1.8" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, + {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev"] +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] +markers = {main = "sys_platform == \"win32\" or platform_system == \"Windows\""} + +[[package]] +name = "commitizen" +version = "4.2.2" +description = "Python commitizen client tool" +optional = false +python-versions = "<4.0,>=3.9" +groups = ["dev"] +files = [ + {file = "commitizen-4.2.2-py3-none-any.whl", hash = "sha256:5b42228178ee999dbdd95c2bf0ea73f8f539e8ed4cad421c2fe0b55b16458d2f"}, + {file = "commitizen-4.2.2.tar.gz", hash = "sha256:eadf31514d6ce6a12537ccba095d3107f659ff99ae6159212d9de2a9d896dd76"}, +] + +[package.dependencies] +argcomplete = ">=1.12.1,<3.6" +charset-normalizer = ">=2.1.0,<4" +colorama = ">=0.4.1,<1.0" +decli = ">=0.6.0,<1.0" +jinja2 = ">=2.10.3" +packaging = ">=19" +pyyaml = ">=3.08" +questionary = ">=2.0,<3.0" +termcolor = ">=1.1,<3" +tomlkit = ">=0.5.3,<1.0.0" + +[[package]] +name = "coverage" +version = "7.6.12" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "coverage-7.6.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:704c8c8c6ce6569286ae9622e534b4f5b9759b6f2cd643f1c1a61f666d534fe8"}, + {file = "coverage-7.6.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ad7525bf0241e5502168ae9c643a2f6c219fa0a283001cee4cf23a9b7da75879"}, + {file = "coverage-7.6.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06097c7abfa611c91edb9e6920264e5be1d6ceb374efb4986f38b09eed4cb2fe"}, + {file = "coverage-7.6.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:220fa6c0ad7d9caef57f2c8771918324563ef0d8272c94974717c3909664e674"}, + {file = "coverage-7.6.12-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3688b99604a24492bcfe1c106278c45586eb819bf66a654d8a9a1433022fb2eb"}, + {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d1a987778b9c71da2fc8948e6f2656da6ef68f59298b7e9786849634c35d2c3c"}, + {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:cec6b9ce3bd2b7853d4a4563801292bfee40b030c05a3d29555fd2a8ee9bd68c"}, + {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ace9048de91293e467b44bce0f0381345078389814ff6e18dbac8fdbf896360e"}, + {file = "coverage-7.6.12-cp310-cp310-win32.whl", hash = "sha256:ea31689f05043d520113e0552f039603c4dd71fa4c287b64cb3606140c66f425"}, + {file = "coverage-7.6.12-cp310-cp310-win_amd64.whl", hash = "sha256:676f92141e3c5492d2a1596d52287d0d963df21bf5e55c8b03075a60e1ddf8aa"}, + {file = "coverage-7.6.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e18aafdfb3e9ec0d261c942d35bd7c28d031c5855dadb491d2723ba54f4c3015"}, + {file = "coverage-7.6.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66fe626fd7aa5982cdebad23e49e78ef7dbb3e3c2a5960a2b53632f1f703ea45"}, + {file = "coverage-7.6.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ef01d70198431719af0b1f5dcbefc557d44a190e749004042927b2a3fed0702"}, + {file = "coverage-7.6.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e92ae5a289a4bc4c0aae710c0948d3c7892e20fd3588224ebe242039573bf0"}, + {file = "coverage-7.6.12-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e695df2c58ce526eeab11a2e915448d3eb76f75dffe338ea613c1201b33bab2f"}, + {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d74c08e9aaef995f8c4ef6d202dbd219c318450fe2a76da624f2ebb9c8ec5d9f"}, + {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e995b3b76ccedc27fe4f477b349b7d64597e53a43fc2961db9d3fbace085d69d"}, + {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b1f097878d74fe51e1ddd1be62d8e3682748875b461232cf4b52ddc6e6db0bba"}, + {file = "coverage-7.6.12-cp311-cp311-win32.whl", hash = "sha256:1f7ffa05da41754e20512202c866d0ebfc440bba3b0ed15133070e20bf5aeb5f"}, + {file = "coverage-7.6.12-cp311-cp311-win_amd64.whl", hash = "sha256:e216c5c45f89ef8971373fd1c5d8d1164b81f7f5f06bbf23c37e7908d19e8558"}, + {file = "coverage-7.6.12-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b172f8e030e8ef247b3104902cc671e20df80163b60a203653150d2fc204d1ad"}, + {file = "coverage-7.6.12-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:641dfe0ab73deb7069fb972d4d9725bf11c239c309ce694dd50b1473c0f641c3"}, + {file = "coverage-7.6.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e549f54ac5f301e8e04c569dfdb907f7be71b06b88b5063ce9d6953d2d58574"}, + {file = "coverage-7.6.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:959244a17184515f8c52dcb65fb662808767c0bd233c1d8a166e7cf74c9ea985"}, + {file = "coverage-7.6.12-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bda1c5f347550c359f841d6614fb8ca42ae5cb0b74d39f8a1e204815ebe25750"}, + {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1ceeb90c3eda1f2d8c4c578c14167dbd8c674ecd7d38e45647543f19839dd6ea"}, + {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f16f44025c06792e0fb09571ae454bcc7a3ec75eeb3c36b025eccf501b1a4c3"}, + {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b076e625396e787448d27a411aefff867db2bffac8ed04e8f7056b07024eed5a"}, + {file = "coverage-7.6.12-cp312-cp312-win32.whl", hash = "sha256:00b2086892cf06c7c2d74983c9595dc511acca00665480b3ddff749ec4fb2a95"}, + {file = "coverage-7.6.12-cp312-cp312-win_amd64.whl", hash = "sha256:7ae6eabf519bc7871ce117fb18bf14e0e343eeb96c377667e3e5dd12095e0288"}, + {file = "coverage-7.6.12-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:488c27b3db0ebee97a830e6b5a3ea930c4a6e2c07f27a5e67e1b3532e76b9ef1"}, + {file = "coverage-7.6.12-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d1095bbee1851269f79fd8e0c9b5544e4c00c0c24965e66d8cba2eb5bb535fd"}, + {file = "coverage-7.6.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0533adc29adf6a69c1baa88c3d7dbcaadcffa21afbed3ca7a225a440e4744bf9"}, + {file = "coverage-7.6.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53c56358d470fa507a2b6e67a68fd002364d23c83741dbc4c2e0680d80ca227e"}, + {file = "coverage-7.6.12-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64cbb1a3027c79ca6310bf101014614f6e6e18c226474606cf725238cf5bc2d4"}, + {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:79cac3390bfa9836bb795be377395f28410811c9066bc4eefd8015258a7578c6"}, + {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b148068e881faa26d878ff63e79650e208e95cf1c22bd3f77c3ca7b1d9821a3"}, + {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8bec2ac5da793c2685ce5319ca9bcf4eee683b8a1679051f8e6ec04c4f2fd7dc"}, + {file = "coverage-7.6.12-cp313-cp313-win32.whl", hash = "sha256:200e10beb6ddd7c3ded322a4186313d5ca9e63e33d8fab4faa67ef46d3460af3"}, + {file = "coverage-7.6.12-cp313-cp313-win_amd64.whl", hash = "sha256:2b996819ced9f7dbb812c701485d58f261bef08f9b85304d41219b1496b591ef"}, + {file = "coverage-7.6.12-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:299cf973a7abff87a30609879c10df0b3bfc33d021e1adabc29138a48888841e"}, + {file = "coverage-7.6.12-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4b467a8c56974bf06e543e69ad803c6865249d7a5ccf6980457ed2bc50312703"}, + {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2458f275944db8129f95d91aee32c828a408481ecde3b30af31d552c2ce284a0"}, + {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a9d8be07fb0832636a0f72b80d2a652fe665e80e720301fb22b191c3434d924"}, + {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d47376a4f445e9743f6c83291e60adb1b127607a3618e3185bbc8091f0467b"}, + {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b95574d06aa9d2bd6e5cc35a5bbe35696342c96760b69dc4287dbd5abd4ad51d"}, + {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:ecea0c38c9079570163d663c0433a9af4094a60aafdca491c6a3d248c7432827"}, + {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2251fabcfee0a55a8578a9d29cecfee5f2de02f11530e7d5c5a05859aa85aee9"}, + {file = "coverage-7.6.12-cp313-cp313t-win32.whl", hash = "sha256:eb5507795caabd9b2ae3f1adc95f67b1104971c22c624bb354232d65c4fc90b3"}, + {file = "coverage-7.6.12-cp313-cp313t-win_amd64.whl", hash = "sha256:f60a297c3987c6c02ffb29effc70eadcbb412fe76947d394a1091a3615948e2f"}, + {file = "coverage-7.6.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e7575ab65ca8399c8c4f9a7d61bbd2d204c8b8e447aab9d355682205c9dd948d"}, + {file = "coverage-7.6.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8161d9fbc7e9fe2326de89cd0abb9f3599bccc1287db0aba285cb68d204ce929"}, + {file = "coverage-7.6.12-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a1e465f398c713f1b212400b4e79a09829cd42aebd360362cd89c5bdc44eb87"}, + {file = "coverage-7.6.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f25d8b92a4e31ff1bd873654ec367ae811b3a943583e05432ea29264782dc32c"}, + {file = "coverage-7.6.12-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a936309a65cc5ca80fa9f20a442ff9e2d06927ec9a4f54bcba9c14c066323f2"}, + {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:aa6f302a3a0b5f240ee201297fff0bbfe2fa0d415a94aeb257d8b461032389bd"}, + {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f973643ef532d4f9be71dd88cf7588936685fdb576d93a79fe9f65bc337d9d73"}, + {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:78f5243bb6b1060aed6213d5107744c19f9571ec76d54c99cc15938eb69e0e86"}, + {file = "coverage-7.6.12-cp39-cp39-win32.whl", hash = "sha256:69e62c5034291c845fc4df7f8155e8544178b6c774f97a99e2734b05eb5bed31"}, + {file = "coverage-7.6.12-cp39-cp39-win_amd64.whl", hash = "sha256:b01a840ecc25dce235ae4c1b6a0daefb2a203dba0e6e980637ee9c2f6ee0df57"}, + {file = "coverage-7.6.12-pp39.pp310-none-any.whl", hash = "sha256:7e39e845c4d764208e7b8f6a21c541ade741e2c41afabdfa1caa28687a3c98cf"}, + {file = "coverage-7.6.12-py3-none-any.whl", hash = "sha256:eb8668cfbc279a536c633137deeb9435d2962caec279c3f8cf8b91fff6ff8953"}, + {file = "coverage-7.6.12.tar.gz", hash = "sha256:48cfc4641d95d34766ad41d9573cc0f22a48aa88d22657a1fe01dca0dbae4de2"}, +] + +[package.extras] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] + +[[package]] +name = "decli" +version = "0.6.2" +description = "Minimal, easy-to-use, declarative cli tool" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "decli-0.6.2-py3-none-any.whl", hash = "sha256:2fc84106ce9a8f523ed501ca543bdb7e416c064917c12a59ebdc7f311a97b7ed"}, + {file = "decli-0.6.2.tar.gz", hash = "sha256:36f71eb55fd0093895efb4f416ec32b7f6e00147dda448e3365cf73ceab42d6f"}, +] + +[[package]] +name = "dill" +version = "0.3.9" +description = "serialize all of Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, + {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + +[[package]] +name = "distlib" +version = "0.3.9" +description = "Distribution utilities" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, + {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, +] + +[[package]] +name = "distro" +version = "1.9.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, + {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, +] + +[[package]] +name = "duckduckgo-search" +version = "7.4.3" +description = "Search for words, documents, images, news, maps and text translation using the DuckDuckGo.com search engine." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "duckduckgo_search-7.4.3-py3-none-any.whl", hash = "sha256:8caf80b3e3b4dab59402cdc81e6b2dfee5c8887f5a1c8ff9a6f38c52f6bccfdf"}, + {file = "duckduckgo_search-7.4.3.tar.gz", hash = "sha256:dba1c7578991bbcaca6183adb2eab388b522ba8f24527789bb6a190a596b47ec"}, +] + +[package.dependencies] +click = ">=8.1.8" +httpx = {version = ">=0.28.1", extras = ["brotli", "http2", "socks"]} +lxml = ">=5.3.0" + +[package.extras] +dev = ["mypy (>=1.14.1)", "pytest (>=8.3.4)", "pytest-dependency (>=0.6.0)", "ruff (>=0.9.2)"] + +[[package]] +name = "fastjsonschema" +version = "2.21.1" +description = "Fastest Python implementation of JSON schema" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "fastjsonschema-2.21.1-py3-none-any.whl", hash = "sha256:c9e5b7e908310918cf494a434eeb31384dd84a98b57a30bcb1f535015b554667"}, + {file = "fastjsonschema-2.21.1.tar.gz", hash = "sha256:794d4f0a58f848961ba16af7b9c85a3e88cd360df008c59aac6fc5ae9323b5d4"}, +] + +[package.extras] +devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] + +[[package]] +name = "filelock" +version = "3.17.0" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "filelock-3.17.0-py3-none-any.whl", hash = "sha256:533dc2f7ba78dc2f0f531fc6c4940addf7b70a481e269a5a3b93be94ffbe8338"}, + {file = "filelock-3.17.0.tar.gz", hash = "sha256:ee4e77401ef576ebb38cd7f13b9b28893194acc20a8e68e18730ba9c0e54660e"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] + +[[package]] +name = "frozenlist" +version = "1.5.0" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5b6a66c18b5b9dd261ca98dffcb826a525334b2f29e7caa54e182255c5f6a65a"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d1b3eb7b05ea246510b43a7e53ed1653e55c2121019a97e60cad7efb881a97bb"}, + {file = "frozenlist-1.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:15538c0cbf0e4fa11d1e3a71f823524b0c46299aed6e10ebb4c2089abd8c3bec"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e79225373c317ff1e35f210dd5f1344ff31066ba8067c307ab60254cd3a78ad5"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9272fa73ca71266702c4c3e2d4a28553ea03418e591e377a03b8e3659d94fa76"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:498524025a5b8ba81695761d78c8dd7382ac0b052f34e66939c42df860b8ff17"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92b5278ed9d50fe610185ecd23c55d8b307d75ca18e94c0e7de328089ac5dcba"}, + {file = "frozenlist-1.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f3c8c1dacd037df16e85227bac13cca58c30da836c6f936ba1df0c05d046d8d"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f2ac49a9bedb996086057b75bf93538240538c6d9b38e57c82d51f75a73409d2"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e66cc454f97053b79c2ab09c17fbe3c825ea6b4de20baf1be28919460dd7877f"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:5a3ba5f9a0dfed20337d3e966dc359784c9f96503674c2faf015f7fe8e96798c"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6321899477db90bdeb9299ac3627a6a53c7399c8cd58d25da094007402b039ab"}, + {file = "frozenlist-1.5.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:76e4753701248476e6286f2ef492af900ea67d9706a0155335a40ea21bf3b2f5"}, + {file = "frozenlist-1.5.0-cp310-cp310-win32.whl", hash = "sha256:977701c081c0241d0955c9586ffdd9ce44f7a7795df39b9151cd9a6fd0ce4cfb"}, + {file = "frozenlist-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:189f03b53e64144f90990d29a27ec4f7997d91ed3d01b51fa39d2dbe77540fd4"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fd74520371c3c4175142d02a976aee0b4cb4a7cc912a60586ffd8d5929979b30"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2f3f7a0fbc219fb4455264cae4d9f01ad41ae6ee8524500f381de64ffaa077d5"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f47c9c9028f55a04ac254346e92977bf0f166c483c74b4232bee19a6697e4778"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0996c66760924da6e88922756d99b47512a71cfd45215f3570bf1e0b694c206a"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2fe128eb4edeabe11896cb6af88fca5346059f6c8d807e3b910069f39157869"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a8ea951bbb6cacd492e3948b8da8c502a3f814f5d20935aae74b5df2b19cf3d"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de537c11e4aa01d37db0d403b57bd6f0546e71a82347a97c6a9f0dcc532b3a45"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c2623347b933fcb9095841f1cc5d4ff0b278addd743e0e966cb3d460278840d"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cee6798eaf8b1416ef6909b06f7dc04b60755206bddc599f52232606e18179d3"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f5f9da7f5dbc00a604fe74aa02ae7c98bcede8a3b8b9666f9f86fc13993bc71a"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:90646abbc7a5d5c7c19461d2e3eeb76eb0b204919e6ece342feb6032c9325ae9"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:bdac3c7d9b705d253b2ce370fde941836a5f8b3c5c2b8fd70940a3ea3af7f4f2"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03d33c2ddbc1816237a67f66336616416e2bbb6beb306e5f890f2eb22b959cdf"}, + {file = "frozenlist-1.5.0-cp311-cp311-win32.whl", hash = "sha256:237f6b23ee0f44066219dae14c70ae38a63f0440ce6750f868ee08775073f942"}, + {file = "frozenlist-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:0cc974cc93d32c42e7b0f6cf242a6bd941c57c61b618e78b6c0a96cb72788c1d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:31115ba75889723431aa9a4e77d5f398f5cf976eea3bdf61749731f62d4a4a21"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7437601c4d89d070eac8323f121fcf25f88674627505334654fd027b091db09d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7948140d9f8ece1745be806f2bfdf390127cf1a763b925c4a805c603df5e697e"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feeb64bc9bcc6b45c6311c9e9b99406660a9c05ca8a5b30d14a78555088b0b3a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:683173d371daad49cffb8309779e886e59c2f369430ad28fe715f66d08d4ab1a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d57d8f702221405a9d9b40f9da8ac2e4a1a8b5285aac6100f3393675f0a85ee"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c72000fbcc35b129cb09956836c7d7abf78ab5416595e4857d1cae8d6251a6"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000a77d6034fbad9b6bb880f7ec073027908f1b40254b5d6f26210d2dab1240e"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5d7f5a50342475962eb18b740f3beecc685a15b52c91f7d975257e13e029eca9"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:87f724d055eb4785d9be84e9ebf0f24e392ddfad00b3fe036e43f489fafc9039"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6e9080bb2fb195a046e5177f10d9d82b8a204c0736a97a153c2466127de87784"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b93d7aaa36c966fa42efcaf716e6b3900438632a626fb09c049f6a2f09fc631"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f"}, + {file = "frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8"}, + {file = "frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03"}, + {file = "frozenlist-1.5.0-cp313-cp313-win32.whl", hash = "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c"}, + {file = "frozenlist-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:dd94994fc91a6177bfaafd7d9fd951bc8689b0a98168aa26b5f543868548d3ca"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0da8bbec082bf6bf18345b180958775363588678f64998c2b7609e34719b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:73f2e31ea8dd7df61a359b731716018c2be196e5bb3b74ddba107f694fbd7604"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:828afae9f17e6de596825cf4228ff28fbdf6065974e5ac1410cecc22f699d2b3"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1577515d35ed5649d52ab4319db757bb881ce3b2b796d7283e6634d99ace307"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2150cc6305a2c2ab33299453e2968611dacb970d2283a14955923062c8d00b10"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a72b7a6e3cd2725eff67cd64c8f13335ee18fc3c7befc05aed043d24c7b9ccb9"}, + {file = "frozenlist-1.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c16d2fa63e0800723139137d667e1056bee1a1cf7965153d2d104b62855e9b99"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:17dcc32fc7bda7ce5875435003220a457bcfa34ab7924a49a1c19f55b6ee185c"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:97160e245ea33d8609cd2b8fd997c850b56db147a304a262abc2b3be021a9171"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f1e6540b7fa044eee0bb5111ada694cf3dc15f2b0347ca125ee9ca984d5e9e6e"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:91d6c171862df0a6c61479d9724f22efb6109111017c87567cfeb7b5d1449fdf"}, + {file = "frozenlist-1.5.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c1fac3e2ace2eb1052e9f7c7db480818371134410e1f5c55d65e8f3ac6d1407e"}, + {file = "frozenlist-1.5.0-cp38-cp38-win32.whl", hash = "sha256:b97f7b575ab4a8af9b7bc1d2ef7f29d3afee2226bd03ca3875c16451ad5a7723"}, + {file = "frozenlist-1.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:374ca2dabdccad8e2a76d40b1d037f5bd16824933bf7bcea3e59c891fd4a0923"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9bbcdfaf4af7ce002694a4e10a0159d5a8d20056a12b05b45cea944a4953f972"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1893f948bf6681733aaccf36c5232c231e3b5166d607c5fa77773611df6dc336"}, + {file = "frozenlist-1.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2b5e23253bb709ef57a8e95e6ae48daa9ac5f265637529e4ce6b003a37b2621f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f253985bb515ecd89629db13cb58d702035ecd8cfbca7d7a7e29a0e6d39af5f"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04a5c6babd5e8fb7d3c871dc8b321166b80e41b637c31a995ed844a6139942b6"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fe0f1c29ba24ba6ff6abf688cb0b7cf1efab6b6aa6adc55441773c252f7411"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:226d72559fa19babe2ccd920273e767c96a49b9d3d38badd7c91a0fdeda8ea08"}, + {file = "frozenlist-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b731db116ab3aedec558573c1a5eec78822b32292fe4f2f0345b7f697745c2"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:366d8f93e3edfe5a918c874702f78faac300209a4d5bf38352b2c1bdc07a766d"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1b96af8c582b94d381a1c1f51ffaedeb77c821c690ea5f01da3d70a487dd0a9b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c03eff4a41bd4e38415cbed054bbaff4a075b093e2394b6915dca34a40d1e38b"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:50cf5e7ee9b98f22bdecbabf3800ae78ddcc26e4a435515fc72d97903e8488e0"}, + {file = "frozenlist-1.5.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e76bfbc72353269c44e0bc2cfe171900fbf7f722ad74c9a7b638052afe6a00c"}, + {file = "frozenlist-1.5.0-cp39-cp39-win32.whl", hash = "sha256:666534d15ba8f0fda3f53969117383d5dc021266b3c1a42c9ec4855e4b58b9d3"}, + {file = "frozenlist-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:5c28f4b5dbef8a0d8aad0d4de24d1e9e981728628afaf4ea0792f5d0939372f0"}, + {file = "frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3"}, + {file = "frozenlist-1.5.0.tar.gz", hash = "sha256:81d5af29e61b9c8348e876d442253723928dce6433e0e76cd925cd83f1b4b817"}, +] + +[[package]] +name = "fsspec" +version = "2025.2.0" +description = "File-system specification" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "fsspec-2025.2.0-py3-none-any.whl", hash = "sha256:9de2ad9ce1f85e1931858535bc882543171d197001a0a5eb2ddc04f1781ab95b"}, + {file = "fsspec-2025.2.0.tar.gz", hash = "sha256:1c24b16eaa0a1798afa0337aa0db9b256718ab2a89c425371f5628d22c3b6afd"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +dev = ["pre-commit", "ruff"] +doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"] +test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"] +test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"] +tqdm = ["tqdm"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "h2" +version = "4.2.0" +description = "Pure-Python HTTP/2 protocol implementation" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "h2-4.2.0-py3-none-any.whl", hash = "sha256:479a53ad425bb29af087f3458a61d30780bc818e4ebcf01f0b536ba916462ed0"}, + {file = "h2-4.2.0.tar.gz", hash = "sha256:c8a52129695e88b1a0578d8d2cc6842bbd79128ac685463b887ee278126ad01f"}, +] + +[package.dependencies] +hpack = ">=4.1,<5" +hyperframe = ">=6.1,<7" + +[[package]] +name = "hpack" +version = "4.1.0" +description = "Pure-Python HPACK header encoding" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "hpack-4.1.0-py3-none-any.whl", hash = "sha256:157ac792668d995c657d93111f46b4535ed114f0c9c8d672271bbec7eae1b496"}, + {file = "hpack-4.1.0.tar.gz", hash = "sha256:ec5eca154f7056aa06f196a557655c5b009b382873ac8d1e66e79e87535f1dca"}, +] + +[[package]] +name = "httpcore" +version = "1.0.7" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, + {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<1.0)"] + +[[package]] +name = "httpx" +version = "0.28.1" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, +] + +[package.dependencies] +anyio = "*" +brotli = {version = "*", optional = true, markers = "platform_python_implementation == \"CPython\" and extra == \"brotli\""} +brotlicffi = {version = "*", optional = true, markers = "platform_python_implementation != \"CPython\" and extra == \"brotli\""} +certifi = "*" +h2 = {version = ">=3,<5", optional = true, markers = "extra == \"http2\""} +httpcore = "==1.*" +idna = "*" +socksio = {version = "==1.*", optional = true, markers = "extra == \"socks\""} + +[package.extras] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "httpx-sse" +version = "0.4.0" +description = "Consume Server-Sent Event (SSE) messages with HTTPX." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, + {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, +] + +[[package]] +name = "huggingface-hub" +version = "0.29.0" +description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" +optional = false +python-versions = ">=3.8.0" +groups = ["main"] +files = [ + {file = "huggingface_hub-0.29.0-py3-none-any.whl", hash = "sha256:c02daa0b6bafbdacb1320fdfd1dc7151d0940825c88c4ef89837fdb1f6ea0afe"}, + {file = "huggingface_hub-0.29.0.tar.gz", hash = "sha256:64034c852be270cac16c5743fe1f659b14515a9de6342d6f42cbb2ede191fc80"}, +] + +[package.dependencies] +filelock = "*" +fsspec = ">=2023.5.0" +packaging = ">=20.9" +pyyaml = ">=5.1" +requests = "*" +tqdm = ">=4.42.1" +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +cli = ["InquirerPy (==0.3.4)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] +hf-transfer = ["hf-transfer (>=0.1.4)"] +inference = ["aiohttp"] +quality = ["libcst (==1.4.0)", "mypy (==1.5.1)", "ruff (>=0.9.0)"] +tensorflow = ["graphviz", "pydot", "tensorflow"] +tensorflow-testing = ["keras (<3.0)", "tensorflow"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["safetensors[torch]", "torch"] +typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] + +[[package]] +name = "hyperframe" +version = "6.1.0" +description = "Pure-Python HTTP/2 framing" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "hyperframe-6.1.0-py3-none-any.whl", hash = "sha256:b03380493a519fce58ea5af42e4a42317bf9bd425596f7a0835ffce80f1a42e5"}, + {file = "hyperframe-6.1.0.tar.gz", hash = "sha256:f630908a00854a7adeabd6382b43923a4c4cd4b821fcb527e6ab9e15382a3b08"}, +] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "importlib-metadata" +version = "8.6.1" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"}, + {file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"}, +] + +[package.dependencies] +zipp = ">=3.20" + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "isort" +version = "6.0.0" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.9.0" +groups = ["main"] +files = [ + {file = "isort-6.0.0-py3-none-any.whl", hash = "sha256:567954102bb47bb12e0fae62606570faacddd441e45683968c8d1734fb1af892"}, + {file = "isort-6.0.0.tar.gz", hash = "sha256:75d9d8a1438a9432a7d7b54f2d3b45cad9a4a0fdba43617d9873379704a8bdf1"}, +] + +[package.extras] +colors = ["colorama"] +plugins = ["setuptools"] + +[[package]] +name = "jinja2" +version = "3.1.5" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"}, + {file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jiter" +version = "0.8.2" +description = "Fast iterable JSON parser." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "jiter-0.8.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:ca8577f6a413abe29b079bc30f907894d7eb07a865c4df69475e868d73e71c7b"}, + {file = "jiter-0.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b25bd626bde7fb51534190c7e3cb97cee89ee76b76d7585580e22f34f5e3f393"}, + {file = "jiter-0.8.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5c826a221851a8dc028eb6d7d6429ba03184fa3c7e83ae01cd6d3bd1d4bd17d"}, + {file = "jiter-0.8.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d35c864c2dff13dfd79fb070fc4fc6235d7b9b359efe340e1261deb21b9fcb66"}, + {file = "jiter-0.8.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f557c55bc2b7676e74d39d19bcb8775ca295c7a028246175d6a8b431e70835e5"}, + {file = "jiter-0.8.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:580ccf358539153db147e40751a0b41688a5ceb275e6f3e93d91c9467f42b2e3"}, + {file = "jiter-0.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af102d3372e917cffce49b521e4c32c497515119dc7bd8a75665e90a718bbf08"}, + {file = "jiter-0.8.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cadcc978f82397d515bb2683fc0d50103acff2a180552654bb92d6045dec2c49"}, + {file = "jiter-0.8.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ba5bdf56969cad2019d4e8ffd3f879b5fdc792624129741d3d83fc832fef8c7d"}, + {file = "jiter-0.8.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3b94a33a241bee9e34b8481cdcaa3d5c2116f575e0226e421bed3f7a6ea71cff"}, + {file = "jiter-0.8.2-cp310-cp310-win32.whl", hash = "sha256:6e5337bf454abddd91bd048ce0dca5134056fc99ca0205258766db35d0a2ea43"}, + {file = "jiter-0.8.2-cp310-cp310-win_amd64.whl", hash = "sha256:4a9220497ca0cb1fe94e3f334f65b9b5102a0b8147646118f020d8ce1de70105"}, + {file = "jiter-0.8.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2dd61c5afc88a4fda7d8b2cf03ae5947c6ac7516d32b7a15bf4b49569a5c076b"}, + {file = "jiter-0.8.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a6c710d657c8d1d2adbbb5c0b0c6bfcec28fd35bd6b5f016395f9ac43e878a15"}, + {file = "jiter-0.8.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9584de0cd306072635fe4b89742bf26feae858a0683b399ad0c2509011b9dc0"}, + {file = "jiter-0.8.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5a90a923338531b7970abb063cfc087eebae6ef8ec8139762007188f6bc69a9f"}, + {file = "jiter-0.8.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21974d246ed0181558087cd9f76e84e8321091ebfb3a93d4c341479a736f099"}, + {file = "jiter-0.8.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:32475a42b2ea7b344069dc1e81445cfc00b9d0e3ca837f0523072432332e9f74"}, + {file = "jiter-0.8.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b9931fd36ee513c26b5bf08c940b0ac875de175341cbdd4fa3be109f0492586"}, + {file = "jiter-0.8.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ce0820f4a3a59ddced7fce696d86a096d5cc48d32a4183483a17671a61edfddc"}, + {file = "jiter-0.8.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8ffc86ae5e3e6a93765d49d1ab47b6075a9c978a2b3b80f0f32628f39caa0c88"}, + {file = "jiter-0.8.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5127dc1abd809431172bc3fbe8168d6b90556a30bb10acd5ded41c3cfd6f43b6"}, + {file = "jiter-0.8.2-cp311-cp311-win32.whl", hash = "sha256:66227a2c7b575720c1871c8800d3a0122bb8ee94edb43a5685aa9aceb2782d44"}, + {file = "jiter-0.8.2-cp311-cp311-win_amd64.whl", hash = "sha256:cde031d8413842a1e7501e9129b8e676e62a657f8ec8166e18a70d94d4682855"}, + {file = "jiter-0.8.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e6ec2be506e7d6f9527dae9ff4b7f54e68ea44a0ef6b098256ddf895218a2f8f"}, + {file = "jiter-0.8.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76e324da7b5da060287c54f2fabd3db5f76468006c811831f051942bf68c9d44"}, + {file = "jiter-0.8.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:180a8aea058f7535d1c84183c0362c710f4750bef66630c05f40c93c2b152a0f"}, + {file = "jiter-0.8.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:025337859077b41548bdcbabe38698bcd93cfe10b06ff66617a48ff92c9aec60"}, + {file = "jiter-0.8.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecff0dc14f409599bbcafa7e470c00b80f17abc14d1405d38ab02e4b42e55b57"}, + {file = "jiter-0.8.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffd9fee7d0775ebaba131f7ca2e2d83839a62ad65e8e02fe2bd8fc975cedeb9e"}, + {file = "jiter-0.8.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14601dcac4889e0a1c75ccf6a0e4baf70dbc75041e51bcf8d0e9274519df6887"}, + {file = "jiter-0.8.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:92249669925bc1c54fcd2ec73f70f2c1d6a817928480ee1c65af5f6b81cdf12d"}, + {file = "jiter-0.8.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e725edd0929fa79f8349ab4ec7f81c714df51dc4e991539a578e5018fa4a7152"}, + {file = "jiter-0.8.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bf55846c7b7a680eebaf9c3c48d630e1bf51bdf76c68a5f654b8524335b0ad29"}, + {file = "jiter-0.8.2-cp312-cp312-win32.whl", hash = "sha256:7efe4853ecd3d6110301665a5178b9856be7e2a9485f49d91aa4d737ad2ae49e"}, + {file = "jiter-0.8.2-cp312-cp312-win_amd64.whl", hash = "sha256:83c0efd80b29695058d0fd2fa8a556490dbce9804eac3e281f373bbc99045f6c"}, + {file = "jiter-0.8.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ca1f08b8e43dc3bd0594c992fb1fd2f7ce87f7bf0d44358198d6da8034afdf84"}, + {file = "jiter-0.8.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5672a86d55416ccd214c778efccf3266b84f87b89063b582167d803246354be4"}, + {file = "jiter-0.8.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58dc9bc9767a1101f4e5e22db1b652161a225874d66f0e5cb8e2c7d1c438b587"}, + {file = "jiter-0.8.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b2998606d6dadbb5ccda959a33d6a5e853252d921fec1792fc902351bb4e2c"}, + {file = "jiter-0.8.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ab9a87f3784eb0e098f84a32670cfe4a79cb6512fd8f42ae3d0709f06405d18"}, + {file = "jiter-0.8.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:79aec8172b9e3c6d05fd4b219d5de1ac616bd8da934107325a6c0d0e866a21b6"}, + {file = "jiter-0.8.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:711e408732d4e9a0208008e5892c2966b485c783cd2d9a681f3eb147cf36c7ef"}, + {file = "jiter-0.8.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:653cf462db4e8c41995e33d865965e79641ef45369d8a11f54cd30888b7e6ff1"}, + {file = "jiter-0.8.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:9c63eaef32b7bebac8ebebf4dabebdbc6769a09c127294db6babee38e9f405b9"}, + {file = "jiter-0.8.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:eb21aaa9a200d0a80dacc7a81038d2e476ffe473ffdd9c91eb745d623561de05"}, + {file = "jiter-0.8.2-cp313-cp313-win32.whl", hash = "sha256:789361ed945d8d42850f919342a8665d2dc79e7e44ca1c97cc786966a21f627a"}, + {file = "jiter-0.8.2-cp313-cp313-win_amd64.whl", hash = "sha256:ab7f43235d71e03b941c1630f4b6e3055d46b6cb8728a17663eaac9d8e83a865"}, + {file = "jiter-0.8.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b426f72cd77da3fec300ed3bc990895e2dd6b49e3bfe6c438592a3ba660e41ca"}, + {file = "jiter-0.8.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2dd880785088ff2ad21ffee205e58a8c1ddabc63612444ae41e5e4b321b39c0"}, + {file = "jiter-0.8.2-cp313-cp313t-win_amd64.whl", hash = "sha256:3ac9f578c46f22405ff7f8b1f5848fb753cc4b8377fbec8470a7dc3997ca7566"}, + {file = "jiter-0.8.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9e1fa156ee9454642adb7e7234a383884452532bc9d53d5af2d18d98ada1d79c"}, + {file = "jiter-0.8.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0cf5dfa9956d96ff2efb0f8e9c7d055904012c952539a774305aaaf3abdf3d6c"}, + {file = "jiter-0.8.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e52bf98c7e727dd44f7c4acb980cb988448faeafed8433c867888268899b298b"}, + {file = "jiter-0.8.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a2ecaa3c23e7a7cf86d00eda3390c232f4d533cd9ddea4b04f5d0644faf642c5"}, + {file = "jiter-0.8.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:08d4c92bf480e19fc3f2717c9ce2aa31dceaa9163839a311424b6862252c943e"}, + {file = "jiter-0.8.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99d9a1eded738299ba8e106c6779ce5c3893cffa0e32e4485d680588adae6db8"}, + {file = "jiter-0.8.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d20be8b7f606df096e08b0b1b4a3c6f0515e8dac296881fe7461dfa0fb5ec817"}, + {file = "jiter-0.8.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d33f94615fcaf872f7fd8cd98ac3b429e435c77619777e8a449d9d27e01134d1"}, + {file = "jiter-0.8.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:317b25e98a35ffec5c67efe56a4e9970852632c810d35b34ecdd70cc0e47b3b6"}, + {file = "jiter-0.8.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fc9043259ee430ecd71d178fccabd8c332a3bf1e81e50cae43cc2b28d19e4cb7"}, + {file = "jiter-0.8.2-cp38-cp38-win32.whl", hash = "sha256:fc5adda618205bd4678b146612ce44c3cbfdee9697951f2c0ffdef1f26d72b63"}, + {file = "jiter-0.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:cd646c827b4f85ef4a78e4e58f4f5854fae0caf3db91b59f0d73731448a970c6"}, + {file = "jiter-0.8.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e41e75344acef3fc59ba4765df29f107f309ca9e8eace5baacabd9217e52a5ee"}, + {file = "jiter-0.8.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f22b16b35d5c1df9dfd58843ab2cd25e6bf15191f5a236bed177afade507bfc"}, + {file = "jiter-0.8.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7200b8f7619d36aa51c803fd52020a2dfbea36ffec1b5e22cab11fd34d95a6d"}, + {file = "jiter-0.8.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:70bf4c43652cc294040dbb62256c83c8718370c8b93dd93d934b9a7bf6c4f53c"}, + {file = "jiter-0.8.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f9d471356dc16f84ed48768b8ee79f29514295c7295cb41e1133ec0b2b8d637d"}, + {file = "jiter-0.8.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:859e8eb3507894093d01929e12e267f83b1d5f6221099d3ec976f0c995cb6bd9"}, + {file = "jiter-0.8.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaa58399c01db555346647a907b4ef6d4f584b123943be6ed5588c3f2359c9f4"}, + {file = "jiter-0.8.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8f2d5ed877f089862f4c7aacf3a542627c1496f972a34d0474ce85ee7d939c27"}, + {file = "jiter-0.8.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:03c9df035d4f8d647f8c210ddc2ae0728387275340668fb30d2421e17d9a0841"}, + {file = "jiter-0.8.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8bd2a824d08d8977bb2794ea2682f898ad3d8837932e3a74937e93d62ecbb637"}, + {file = "jiter-0.8.2-cp39-cp39-win32.whl", hash = "sha256:ca29b6371ebc40e496995c94b988a101b9fbbed48a51190a4461fcb0a68b4a36"}, + {file = "jiter-0.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:1c0dfbd1be3cbefc7510102370d86e35d1d53e5a93d48519688b1bf0f761160a"}, + {file = "jiter-0.8.2.tar.gz", hash = "sha256:cd73d3e740666d0e639f678adb176fad25c1bcbdae88d8d7b857e1783bb4212d"}, +] + +[[package]] +name = "jsonschema" +version = "4.23.0" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, + {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] + +[[package]] +name = "jsonschema-specifications" +version = "2024.10.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"}, + {file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + +[[package]] +name = "jupyter-core" +version = "5.7.2" +description = "Jupyter core package. A base package on which Jupyter projects rely." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "jupyter_core-5.7.2-py3-none-any.whl", hash = "sha256:4f7315d2f6b4bcf2e3e7cb6e46772eba760ae459cd1f59d29eb57b0a01bd7409"}, + {file = "jupyter_core-5.7.2.tar.gz", hash = "sha256:aa5f8d32bbf6b431ac830496da7392035d6f61b4f54872f15c4bd2a9c3f536d9"}, +] + +[package.dependencies] +platformdirs = ">=2.5" +pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} +traitlets = ">=5.3" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] +test = ["ipykernel", "pre-commit", "pytest (<8)", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "litellm" +version = "1.61.9" +description = "Library to easily interface with LLM API providers" +optional = false +python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8" +groups = ["main"] +files = [ + {file = "litellm-1.61.9-py3-none-any.whl", hash = "sha256:b2ba755dc8bfbc095947cc2a548f08117ec29c9176d8f67b3a83eaf52776fbc2"}, + {file = "litellm-1.61.9.tar.gz", hash = "sha256:792263ab0e40ce10e5bb05f789bbef4578a0caaf40b7a4fc1c373a6eabf9aa0d"}, +] + +[package.dependencies] +aiohttp = "*" +click = "*" +httpx = ">=0.23.0" +importlib-metadata = ">=6.8.0" +jinja2 = ">=3.1.2,<4.0.0" +jsonschema = ">=4.22.0,<5.0.0" +openai = ">=1.61.0" +pydantic = ">=2.0.0,<3.0.0" +python-dotenv = ">=0.2.0" +tiktoken = ">=0.7.0" +tokenizers = "*" + +[package.extras] +extra-proxy = ["azure-identity (>=1.15.0,<2.0.0)", "azure-keyvault-secrets (>=4.8.0,<5.0.0)", "google-cloud-kms (>=2.21.3,<3.0.0)", "prisma (==0.11.0)", "resend (>=0.8.0,<0.9.0)"] +proxy = ["PyJWT (>=2.8.0,<3.0.0)", "apscheduler (>=3.10.4,<4.0.0)", "backoff", "cryptography (>=43.0.1,<44.0.0)", "fastapi (>=0.115.5,<0.116.0)", "fastapi-sso (>=0.16.0,<0.17.0)", "gunicorn (>=22.0.0,<23.0.0)", "orjson (>=3.9.7,<4.0.0)", "pynacl (>=1.5.0,<2.0.0)", "python-multipart (>=0.0.18,<0.0.19)", "pyyaml (>=6.0.1,<7.0.0)", "rq", "uvicorn (>=0.29.0,<0.30.0)", "uvloop (>=0.21.0,<0.22.0)"] + +[[package]] +name = "lxml" +version = "5.3.1" +description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "lxml-5.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a4058f16cee694577f7e4dd410263cd0ef75644b43802a689c2b3c2a7e69453b"}, + {file = "lxml-5.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:364de8f57d6eda0c16dcfb999af902da31396949efa0e583e12675d09709881b"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:528f3a0498a8edc69af0559bdcf8a9f5a8bf7c00051a6ef3141fdcf27017bbf5"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db4743e30d6f5f92b6d2b7c86b3ad250e0bad8dee4b7ad8a0c44bfb276af89a3"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:17b5d7f8acf809465086d498d62a981fa6a56d2718135bb0e4aa48c502055f5c"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:928e75a7200a4c09e6efc7482a1337919cc61fe1ba289f297827a5b76d8969c2"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a997b784a639e05b9d4053ef3b20c7e447ea80814a762f25b8ed5a89d261eac"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:7b82e67c5feb682dbb559c3e6b78355f234943053af61606af126df2183b9ef9"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:f1de541a9893cf8a1b1db9bf0bf670a2decab42e3e82233d36a74eda7822b4c9"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:de1fc314c3ad6bc2f6bd5b5a5b9357b8c6896333d27fdbb7049aea8bd5af2d79"}, + {file = "lxml-5.3.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:7c0536bd9178f754b277a3e53f90f9c9454a3bd108b1531ffff720e082d824f2"}, + {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:68018c4c67d7e89951a91fbd371e2e34cd8cfc71f0bb43b5332db38497025d51"}, + {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:aa826340a609d0c954ba52fd831f0fba2a4165659ab0ee1a15e4aac21f302406"}, + {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:796520afa499732191e39fc95b56a3b07f95256f2d22b1c26e217fb69a9db5b5"}, + {file = "lxml-5.3.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3effe081b3135237da6e4c4530ff2a868d3f80be0bda027e118a5971285d42d0"}, + {file = "lxml-5.3.1-cp310-cp310-win32.whl", hash = "sha256:a22f66270bd6d0804b02cd49dae2b33d4341015545d17f8426f2c4e22f557a23"}, + {file = "lxml-5.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:0bcfadea3cdc68e678d2b20cb16a16716887dd00a881e16f7d806c2138b8ff0c"}, + {file = "lxml-5.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e220f7b3e8656ab063d2eb0cd536fafef396829cafe04cb314e734f87649058f"}, + {file = "lxml-5.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f2cfae0688fd01f7056a17367e3b84f37c545fb447d7282cf2c242b16262607"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:67d2f8ad9dcc3a9e826bdc7802ed541a44e124c29b7d95a679eeb58c1c14ade8"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db0c742aad702fd5d0c6611a73f9602f20aec2007c102630c06d7633d9c8f09a"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:198bb4b4dd888e8390afa4f170d4fa28467a7eaf857f1952589f16cfbb67af27"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2a3e412ce1849be34b45922bfef03df32d1410a06d1cdeb793a343c2f1fd666"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b8969dbc8d09d9cd2ae06362c3bad27d03f433252601ef658a49bd9f2b22d79"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5be8f5e4044146a69c96077c7e08f0709c13a314aa5315981185c1f00235fe65"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:133f3493253a00db2c870d3740bc458ebb7d937bd0a6a4f9328373e0db305709"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:52d82b0d436edd6a1d22d94a344b9a58abd6c68c357ed44f22d4ba8179b37629"}, + {file = "lxml-5.3.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b6f92e35e2658a5ed51c6634ceb5ddae32053182851d8cad2a5bc102a359b33"}, + {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:203b1d3eaebd34277be06a3eb880050f18a4e4d60861efba4fb946e31071a295"}, + {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:155e1a5693cf4b55af652f5c0f78ef36596c7f680ff3ec6eb4d7d85367259b2c"}, + {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:22ec2b3c191f43ed21f9545e9df94c37c6b49a5af0a874008ddc9132d49a2d9c"}, + {file = "lxml-5.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7eda194dd46e40ec745bf76795a7cccb02a6a41f445ad49d3cf66518b0bd9cff"}, + {file = "lxml-5.3.1-cp311-cp311-win32.whl", hash = "sha256:fb7c61d4be18e930f75948705e9718618862e6fc2ed0d7159b2262be73f167a2"}, + {file = "lxml-5.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:c809eef167bf4a57af4b03007004896f5c60bd38dc3852fcd97a26eae3d4c9e6"}, + {file = "lxml-5.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e69add9b6b7b08c60d7ff0152c7c9a6c45b4a71a919be5abde6f98f1ea16421c"}, + {file = "lxml-5.3.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4e52e1b148867b01c05e21837586ee307a01e793b94072d7c7b91d2c2da02ffe"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4b382e0e636ed54cd278791d93fe2c4f370772743f02bcbe431a160089025c9"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2e49dc23a10a1296b04ca9db200c44d3eb32c8d8ec532e8c1fd24792276522a"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4399b4226c4785575fb20998dc571bc48125dc92c367ce2602d0d70e0c455eb0"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5412500e0dc5481b1ee9cf6b38bb3b473f6e411eb62b83dc9b62699c3b7b79f7"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c93ed3c998ea8472be98fb55aed65b5198740bfceaec07b2eba551e55b7b9ae"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:63d57fc94eb0bbb4735e45517afc21ef262991d8758a8f2f05dd6e4174944519"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:b450d7cabcd49aa7ab46a3c6aa3ac7e1593600a1a0605ba536ec0f1b99a04322"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:4df0ec814b50275ad6a99bc82a38b59f90e10e47714ac9871e1b223895825468"}, + {file = "lxml-5.3.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d184f85ad2bb1f261eac55cddfcf62a70dee89982c978e92b9a74a1bfef2e367"}, + {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b725e70d15906d24615201e650d5b0388b08a5187a55f119f25874d0103f90dd"}, + {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a31fa7536ec1fb7155a0cd3a4e3d956c835ad0a43e3610ca32384d01f079ea1c"}, + {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3c3c8b55c7fc7b7e8877b9366568cc73d68b82da7fe33d8b98527b73857a225f"}, + {file = "lxml-5.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d61ec60945d694df806a9aec88e8f29a27293c6e424f8ff91c80416e3c617645"}, + {file = "lxml-5.3.1-cp312-cp312-win32.whl", hash = "sha256:f4eac0584cdc3285ef2e74eee1513a6001681fd9753b259e8159421ed28a72e5"}, + {file = "lxml-5.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:29bfc8d3d88e56ea0a27e7c4897b642706840247f59f4377d81be8f32aa0cfbf"}, + {file = "lxml-5.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c093c7088b40d8266f57ed71d93112bd64c6724d31f0794c1e52cc4857c28e0e"}, + {file = "lxml-5.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b0884e3f22d87c30694e625b1e62e6f30d39782c806287450d9dc2fdf07692fd"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1637fa31ec682cd5760092adfabe86d9b718a75d43e65e211d5931809bc111e7"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a364e8e944d92dcbf33b6b494d4e0fb3499dcc3bd9485beb701aa4b4201fa414"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:779e851fd0e19795ccc8a9bb4d705d6baa0ef475329fe44a13cf1e962f18ff1e"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c4393600915c308e546dc7003d74371744234e8444a28622d76fe19b98fa59d1"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:673b9d8e780f455091200bba8534d5f4f465944cbdd61f31dc832d70e29064a5"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:2e4a570f6a99e96c457f7bec5ad459c9c420ee80b99eb04cbfcfe3fc18ec6423"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:71f31eda4e370f46af42fc9f264fafa1b09f46ba07bdbee98f25689a04b81c20"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:42978a68d3825eaac55399eb37a4d52012a205c0c6262199b8b44fcc6fd686e8"}, + {file = "lxml-5.3.1-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:8b1942b3e4ed9ed551ed3083a2e6e0772de1e5e3aca872d955e2e86385fb7ff9"}, + {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:85c4f11be9cf08917ac2a5a8b6e1ef63b2f8e3799cec194417e76826e5f1de9c"}, + {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:231cf4d140b22a923b1d0a0a4e0b4f972e5893efcdec188934cc65888fd0227b"}, + {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:5865b270b420eda7b68928d70bb517ccbe045e53b1a428129bb44372bf3d7dd5"}, + {file = "lxml-5.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:dbf7bebc2275016cddf3c997bf8a0f7044160714c64a9b83975670a04e6d2252"}, + {file = "lxml-5.3.1-cp313-cp313-win32.whl", hash = "sha256:d0751528b97d2b19a388b302be2a0ee05817097bab46ff0ed76feeec24951f78"}, + {file = "lxml-5.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:91fb6a43d72b4f8863d21f347a9163eecbf36e76e2f51068d59cd004c506f332"}, + {file = "lxml-5.3.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:016b96c58e9a4528219bb563acf1aaaa8bc5452e7651004894a973f03b84ba81"}, + {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82a4bb10b0beef1434fb23a09f001ab5ca87895596b4581fd53f1e5145a8934a"}, + {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d68eeef7b4d08a25e51897dac29bcb62aba830e9ac6c4e3297ee7c6a0cf6439"}, + {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:f12582b8d3b4c6be1d298c49cb7ae64a3a73efaf4c2ab4e37db182e3545815ac"}, + {file = "lxml-5.3.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2df7ed5edeb6bd5590914cd61df76eb6cce9d590ed04ec7c183cf5509f73530d"}, + {file = "lxml-5.3.1-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:585c4dc429deebc4307187d2b71ebe914843185ae16a4d582ee030e6cfbb4d8a"}, + {file = "lxml-5.3.1-cp36-cp36m-win32.whl", hash = "sha256:06a20d607a86fccab2fc15a77aa445f2bdef7b49ec0520a842c5c5afd8381576"}, + {file = "lxml-5.3.1-cp36-cp36m-win_amd64.whl", hash = "sha256:057e30d0012439bc54ca427a83d458752ccda725c1c161cc283db07bcad43cf9"}, + {file = "lxml-5.3.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4867361c049761a56bd21de507cab2c2a608c55102311d142ade7dab67b34f32"}, + {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3dddf0fb832486cc1ea71d189cb92eb887826e8deebe128884e15020bb6e3f61"}, + {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bcc211542f7af6f2dfb705f5f8b74e865592778e6cafdfd19c792c244ccce19"}, + {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaca5a812f050ab55426c32177091130b1e49329b3f002a32934cd0245571307"}, + {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:236610b77589faf462337b3305a1be91756c8abc5a45ff7ca8f245a71c5dab70"}, + {file = "lxml-5.3.1-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:aed57b541b589fa05ac248f4cb1c46cbb432ab82cbd467d1c4f6a2bdc18aecf9"}, + {file = "lxml-5.3.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:75fa3d6946d317ffc7016a6fcc44f42db6d514b7fdb8b4b28cbe058303cb6e53"}, + {file = "lxml-5.3.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:96eef5b9f336f623ffc555ab47a775495e7e8846dde88de5f941e2906453a1ce"}, + {file = "lxml-5.3.1-cp37-cp37m-win32.whl", hash = "sha256:ef45f31aec9be01379fc6c10f1d9c677f032f2bac9383c827d44f620e8a88407"}, + {file = "lxml-5.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0611da6b07dd3720f492db1b463a4d1175b096b49438761cc9f35f0d9eaaef5"}, + {file = "lxml-5.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b2aca14c235c7a08558fe0a4786a1a05873a01e86b474dfa8f6df49101853a4e"}, + {file = "lxml-5.3.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae82fce1d964f065c32c9517309f0c7be588772352d2f40b1574a214bd6e6098"}, + {file = "lxml-5.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7aae7a3d63b935babfdc6864b31196afd5145878ddd22f5200729006366bc4d5"}, + {file = "lxml-5.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8e0d177b1fe251c3b1b914ab64135475c5273c8cfd2857964b2e3bb0fe196a7"}, + {file = "lxml-5.3.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:6c4dd3bfd0c82400060896717dd261137398edb7e524527438c54a8c34f736bf"}, + {file = "lxml-5.3.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f1208c1c67ec9e151d78aa3435aa9b08a488b53d9cfac9b699f15255a3461ef2"}, + {file = "lxml-5.3.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:c6aacf00d05b38a5069826e50ae72751cb5bc27bdc4d5746203988e429b385bb"}, + {file = "lxml-5.3.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:5881aaa4bf3a2d086c5f20371d3a5856199a0d8ac72dd8d0dbd7a2ecfc26ab73"}, + {file = "lxml-5.3.1-cp38-cp38-win32.whl", hash = "sha256:45fbb70ccbc8683f2fb58bea89498a7274af1d9ec7995e9f4af5604e028233fc"}, + {file = "lxml-5.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:7512b4d0fc5339d5abbb14d1843f70499cab90d0b864f790e73f780f041615d7"}, + {file = "lxml-5.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5885bc586f1edb48e5d68e7a4b4757b5feb2a496b64f462b4d65950f5af3364f"}, + {file = "lxml-5.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1b92fe86e04f680b848fff594a908edfa72b31bfc3499ef7433790c11d4c8cd8"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a091026c3bf7519ab1e64655a3f52a59ad4a4e019a6f830c24d6430695b1cf6a"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ffb141361108e864ab5f1813f66e4e1164181227f9b1f105b042729b6c15125"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3715cdf0dd31b836433af9ee9197af10e3df41d273c19bb249230043667a5dfd"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88b72eb7222d918c967202024812c2bfb4048deeb69ca328363fb8e15254c549"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa59974880ab5ad8ef3afaa26f9bda148c5f39e06b11a8ada4660ecc9fb2feb3"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:3bb8149840daf2c3f97cebf00e4ed4a65a0baff888bf2605a8d0135ff5cf764e"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:0d6b2fa86becfa81f0a0271ccb9eb127ad45fb597733a77b92e8a35e53414914"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:136bf638d92848a939fd8f0e06fcf92d9f2e4b57969d94faae27c55f3d85c05b"}, + {file = "lxml-5.3.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:89934f9f791566e54c1d92cdc8f8fd0009447a5ecdb1ec6b810d5f8c4955f6be"}, + {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a8ade0363f776f87f982572c2860cc43c65ace208db49c76df0a21dde4ddd16e"}, + {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:bfbbab9316330cf81656fed435311386610f78b6c93cc5db4bebbce8dd146675"}, + {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:172d65f7c72a35a6879217bcdb4bb11bc88d55fb4879e7569f55616062d387c2"}, + {file = "lxml-5.3.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e3c623923967f3e5961d272718655946e5322b8d058e094764180cdee7bab1af"}, + {file = "lxml-5.3.1-cp39-cp39-win32.whl", hash = "sha256:ce0930a963ff593e8bb6fda49a503911accc67dee7e5445eec972668e672a0f0"}, + {file = "lxml-5.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:f7b64fcd670bca8800bc10ced36620c6bbb321e7bc1214b9c0c0df269c1dddc2"}, + {file = "lxml-5.3.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:afa578b6524ff85fb365f454cf61683771d0170470c48ad9d170c48075f86725"}, + {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67f5e80adf0aafc7b5454f2c1cb0cde920c9b1f2cbd0485f07cc1d0497c35c5d"}, + {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dd0b80ac2d8f13ffc906123a6f20b459cb50a99222d0da492360512f3e50f84"}, + {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:422c179022ecdedbe58b0e242607198580804253da220e9454ffe848daa1cfd2"}, + {file = "lxml-5.3.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:524ccfded8989a6595dbdda80d779fb977dbc9a7bc458864fc9a0c2fc15dc877"}, + {file = "lxml-5.3.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:48fd46bf7155def2e15287c6f2b133a2f78e2d22cdf55647269977b873c65499"}, + {file = "lxml-5.3.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:05123fad495a429f123307ac6d8fd6f977b71e9a0b6d9aeeb8f80c017cb17131"}, + {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a243132767150a44e6a93cd1dde41010036e1cbc63cc3e9fe1712b277d926ce3"}, + {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c92ea6d9dd84a750b2bae72ff5e8cf5fdd13e58dda79c33e057862c29a8d5b50"}, + {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2f1be45d4c15f237209bbf123a0e05b5d630c8717c42f59f31ea9eae2ad89394"}, + {file = "lxml-5.3.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:a83d3adea1e0ee36dac34627f78ddd7f093bb9cfc0a8e97f1572a949b695cb98"}, + {file = "lxml-5.3.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:3edbb9c9130bac05d8c3fe150c51c337a471cc7fdb6d2a0a7d3a88e88a829314"}, + {file = "lxml-5.3.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2f23cf50eccb3255b6e913188291af0150d89dab44137a69e14e4dcb7be981f1"}, + {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df7e5edac4778127f2bf452e0721a58a1cfa4d1d9eac63bdd650535eb8543615"}, + {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:094b28ed8a8a072b9e9e2113a81fda668d2053f2ca9f2d202c2c8c7c2d6516b1"}, + {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:514fe78fc4b87e7a7601c92492210b20a1b0c6ab20e71e81307d9c2e377c64de"}, + {file = "lxml-5.3.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8fffc08de02071c37865a155e5ea5fce0282e1546fd5bde7f6149fcaa32558ac"}, + {file = "lxml-5.3.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4b0d5cdba1b655d5b18042ac9c9ff50bda33568eb80feaaca4fc237b9c4fbfde"}, + {file = "lxml-5.3.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3031e4c16b59424e8d78522c69b062d301d951dc55ad8685736c3335a97fc270"}, + {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb659702a45136c743bc130760c6f137870d4df3a9e14386478b8a0511abcfca"}, + {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a11b16a33656ffc43c92a5343a28dc71eefe460bcc2a4923a96f292692709f6"}, + {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5ae125276f254b01daa73e2c103363d3e99e3e10505686ac7d9d2442dd4627a"}, + {file = "lxml-5.3.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c76722b5ed4a31ba103e0dc77ab869222ec36efe1a614e42e9bcea88a36186fe"}, + {file = "lxml-5.3.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:33e06717c00c788ab4e79bc4726ecc50c54b9bfb55355eae21473c145d83c2d2"}, + {file = "lxml-5.3.1.tar.gz", hash = "sha256:106b7b5d2977b339f1e97efe2778e2ab20e99994cbb0ec5e55771ed0795920c8"}, +] + +[package.extras] +cssselect = ["cssselect (>=0.7)"] +html-clean = ["lxml_html_clean"] +html5 = ["html5lib"] +htmlsoup = ["BeautifulSoup4"] +source = ["Cython (>=3.0.11,<3.1.0)"] + +[[package]] +name = "markupsafe" +version = "3.0.2" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "mcp" +version = "1.2.1" +description = "Model Context Protocol SDK" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "mcp-1.2.1-py3-none-any.whl", hash = "sha256:579bf9c9157850ebb1344f3ca6f7a3021b0123c44c9f089ef577a7062522f0fd"}, + {file = "mcp-1.2.1.tar.gz", hash = "sha256:c9d43dbfe943aa1530e2be8f54b73af3ebfb071243827b4483d421684806cb45"}, +] + +[package.dependencies] +anyio = ">=4.5" +httpx = ">=0.27" +httpx-sse = ">=0.4" +pydantic = ">=2.10.1,<3.0.0" +pydantic-settings = ">=2.6.1" +sse-starlette = ">=1.6.1" +starlette = ">=0.27" +uvicorn = ">=0.30" + +[package.extras] +cli = ["python-dotenv (>=1.0.0)", "typer (>=0.12.4)"] +rich = ["rich (>=13.9.4)"] + +[[package]] +name = "multidict" +version = "6.1.0" +description = "multidict implementation" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, + {file = "multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429"}, + {file = "multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160"}, + {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7"}, + {file = "multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0"}, + {file = "multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156"}, + {file = "multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351"}, + {file = "multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3"}, + {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753"}, + {file = "multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80"}, + {file = "multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3"}, + {file = "multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133"}, + {file = "multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f"}, + {file = "multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44"}, + {file = "multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4"}, + {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6"}, + {file = "multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81"}, + {file = "multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:db7457bac39421addd0c8449933ac32d8042aae84a14911a757ae6ca3eef1392"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d094ddec350a2fb899fec68d8353c78233debde9b7d8b4beeafa70825f1c281a"}, + {file = "multidict-6.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5845c1fd4866bb5dd3125d89b90e57ed3138241540897de748cdf19de8a2fca2"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9079dfc6a70abe341f521f78405b8949f96db48da98aeb43f9907f342f627cdc"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3914f5aaa0f36d5d60e8ece6a308ee1c9784cd75ec8151062614657a114c4478"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c08be4f460903e5a9d0f76818db3250f12e9c344e79314d1d570fc69d7f4eae4"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d093be959277cb7dee84b801eb1af388b6ad3ca6a6b6bf1ed7585895789d027d"}, + {file = "multidict-6.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3702ea6872c5a2a4eeefa6ffd36b042e9773f05b1f37ae3ef7264b1163c2dcf6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2090f6a85cafc5b2db085124d752757c9d251548cedabe9bd31afe6363e0aff2"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f67f217af4b1ff66c68a87318012de788dd95fcfeb24cc889011f4e1c7454dfd"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:189f652a87e876098bbc67b4da1049afb5f5dfbaa310dd67c594b01c10388db6"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:6bb5992037f7a9eff7991ebe4273ea7f51f1c1c511e6a2ce511d0e7bdb754492"}, + {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f4c2b9e770c4e393876e35a7046879d195cd123b4f116d299d442b335bcd"}, + {file = "multidict-6.1.0-cp38-cp38-win32.whl", hash = "sha256:e27bbb6d14416713a8bd7aaa1313c0fc8d44ee48d74497a0ff4c3a1b6ccb5167"}, + {file = "multidict-6.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:22f3105d4fb15c8f57ff3959a58fcab6ce36814486500cd7485651230ad4d4ef"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1"}, + {file = "multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255"}, + {file = "multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972"}, + {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43"}, + {file = "multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada"}, + {file = "multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a"}, + {file = "multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506"}, + {file = "multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a"}, +] + +[[package]] +name = "mypy" +version = "1.15.0" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13"}, + {file = "mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559"}, + {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b"}, + {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3"}, + {file = "mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b"}, + {file = "mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828"}, + {file = "mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f"}, + {file = "mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5"}, + {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e"}, + {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c"}, + {file = "mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f"}, + {file = "mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f"}, + {file = "mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd"}, + {file = "mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f"}, + {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464"}, + {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee"}, + {file = "mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e"}, + {file = "mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22"}, + {file = "mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445"}, + {file = "mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d"}, + {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5"}, + {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036"}, + {file = "mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357"}, + {file = "mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf"}, + {file = "mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078"}, + {file = "mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba"}, + {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5"}, + {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b"}, + {file = "mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2"}, + {file = "mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980"}, + {file = "mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e"}, + {file = "mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43"}, +] + +[package.dependencies] +mypy_extensions = ">=1.0.0" +typing_extensions = ">=4.6.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +groups = ["dev"] +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "nbformat" +version = "5.10.4" +description = "The Jupyter Notebook format" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b"}, + {file = "nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a"}, +] + +[package.dependencies] +fastjsonschema = ">=2.15" +jsonschema = ">=2.6" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +traitlets = ">=5.1" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["pep440", "pre-commit", "pytest", "testpath"] + +[[package]] +name = "nbstripout" +version = "0.8.1" +description = "Strips outputs from Jupyter and IPython notebooks" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "nbstripout-0.8.1-py2.py3-none-any.whl", hash = "sha256:79a8c8da488d98c54c112fa87185045f0271a97d84f1d46918d6a3ee561b30e7"}, + {file = "nbstripout-0.8.1.tar.gz", hash = "sha256:eaac8b6b4e729e8dfe1e5df2c0f8ba44abc5a17a65448f0480141f80be230bb1"}, +] + +[package.dependencies] +nbformat = "*" + +[[package]] +name = "openai" +version = "1.63.2" +description = "The official Python library for the openai API" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "openai-1.63.2-py3-none-any.whl", hash = "sha256:1f38b27b5a40814c2b7d8759ec78110df58c4a614c25f182809ca52b080ff4d4"}, + {file = "openai-1.63.2.tar.gz", hash = "sha256:aeabeec984a7d2957b4928ceaa339e2ead19c61cfcf35ae62b7c363368d26360"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tqdm = ">4" +typing-extensions = ">=4.11,<5" + +[package.extras] +datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] +realtime = ["websockets (>=13,<15)"] + +[[package]] +name = "packaging" +version = "24.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, +] + +[[package]] +name = "pastel" +version = "0.2.1" +description = "Bring colors to your terminal." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["dev"] +files = [ + {file = "pastel-0.2.1-py2.py3-none-any.whl", hash = "sha256:4349225fcdf6c2bb34d483e523475de5bb04a5c10ef711263452cb37d7dd4364"}, + {file = "pastel-0.2.1.tar.gz", hash = "sha256:e6581ac04e973cac858828c6202c1e1e81fee1dc7de7683f3e1ffe0bfd8a573d"}, +] + +[[package]] +name = "platformdirs" +version = "4.3.6" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "poethepoet" +version = "0.32.2" +description = "A task runner that works well with poetry." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "poethepoet-0.32.2-py3-none-any.whl", hash = "sha256:97e165de8e00b07d33fd8d72896fad8b20ccafcd327b1118bb6a3da26af38d33"}, + {file = "poethepoet-0.32.2.tar.gz", hash = "sha256:1d68871dac1b191e27bd68fea57d0e01e9afbba3fcd01dbe6f6bc3fcb071fe4c"}, +] + +[package.dependencies] +pastel = ">=0.2.1,<0.3.0" +pyyaml = ">=6.0.2,<7.0" + +[package.extras] +poetry-plugin = ["poetry (>=1.2.0,<3.0.0) ; python_version < \"4.0\""] + +[[package]] +name = "prompt-toolkit" +version = "3.0.50" +description = "Library for building powerful interactive command lines in Python" +optional = false +python-versions = ">=3.8.0" +groups = ["dev"] +files = [ + {file = "prompt_toolkit-3.0.50-py3-none-any.whl", hash = "sha256:9b6427eb19e479d98acff65196a307c555eb567989e6d88ebbb1b509d9779198"}, + {file = "prompt_toolkit-3.0.50.tar.gz", hash = "sha256:544748f3860a2623ca5cd6d2795e7a14f3d0e1c3c9728359013f79877fc89bab"}, +] + +[package.dependencies] +wcwidth = "*" + +[[package]] +name = "propcache" +version = "0.2.1" +description = "Accelerated property cache" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "propcache-0.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6b3f39a85d671436ee3d12c017f8fdea38509e4f25b28eb25877293c98c243f6"}, + {file = "propcache-0.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d51fbe4285d5db5d92a929e3e21536ea3dd43732c5b177c7ef03f918dff9f2"}, + {file = "propcache-0.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6445804cf4ec763dc70de65a3b0d9954e868609e83850a47ca4f0cb64bd79fea"}, + {file = "propcache-0.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9479aa06a793c5aeba49ce5c5692ffb51fcd9a7016e017d555d5e2b0045d212"}, + {file = "propcache-0.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9631c5e8b5b3a0fda99cb0d29c18133bca1e18aea9effe55adb3da1adef80d3"}, + {file = "propcache-0.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3156628250f46a0895f1f36e1d4fbe062a1af8718ec3ebeb746f1d23f0c5dc4d"}, + {file = "propcache-0.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b6fb63ae352e13748289f04f37868099e69dba4c2b3e271c46061e82c745634"}, + {file = "propcache-0.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:887d9b0a65404929641a9fabb6452b07fe4572b269d901d622d8a34a4e9043b2"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a96dc1fa45bd8c407a0af03b2d5218392729e1822b0c32e62c5bf7eeb5fb3958"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:a7e65eb5c003a303b94aa2c3852ef130230ec79e349632d030e9571b87c4698c"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:999779addc413181912e984b942fbcc951be1f5b3663cd80b2687758f434c583"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:19a0f89a7bb9d8048d9c4370c9c543c396e894c76be5525f5e1ad287f1750ddf"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:1ac2f5fe02fa75f56e1ad473f1175e11f475606ec9bd0be2e78e4734ad575034"}, + {file = "propcache-0.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:574faa3b79e8ebac7cb1d7930f51184ba1ccf69adfdec53a12f319a06030a68b"}, + {file = "propcache-0.2.1-cp310-cp310-win32.whl", hash = "sha256:03ff9d3f665769b2a85e6157ac8b439644f2d7fd17615a82fa55739bc97863f4"}, + {file = "propcache-0.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:2d3af2e79991102678f53e0dbf4c35de99b6b8b58f29a27ca0325816364caaba"}, + {file = "propcache-0.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1ffc3cca89bb438fb9c95c13fc874012f7b9466b89328c3c8b1aa93cdcfadd16"}, + {file = "propcache-0.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f174bbd484294ed9fdf09437f889f95807e5f229d5d93588d34e92106fbf6717"}, + {file = "propcache-0.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:70693319e0b8fd35dd863e3e29513875eb15c51945bf32519ef52927ca883bc3"}, + {file = "propcache-0.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b480c6a4e1138e1aa137c0079b9b6305ec6dcc1098a8ca5196283e8a49df95a9"}, + {file = "propcache-0.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d27b84d5880f6d8aa9ae3edb253c59d9f6642ffbb2c889b78b60361eed449787"}, + {file = "propcache-0.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:857112b22acd417c40fa4595db2fe28ab900c8c5fe4670c7989b1c0230955465"}, + {file = "propcache-0.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf6c4150f8c0e32d241436526f3c3f9cbd34429492abddbada2ffcff506c51af"}, + {file = "propcache-0.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66d4cfda1d8ed687daa4bc0274fcfd5267873db9a5bc0418c2da19273040eeb7"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c2f992c07c0fca81655066705beae35fc95a2fa7366467366db627d9f2ee097f"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:4a571d97dbe66ef38e472703067021b1467025ec85707d57e78711c085984e54"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bb6178c241278d5fe853b3de743087be7f5f4c6f7d6d22a3b524d323eecec505"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ad1af54a62ffe39cf34db1aa6ed1a1873bd548f6401db39d8e7cd060b9211f82"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e7048abd75fe40712005bcfc06bb44b9dfcd8e101dda2ecf2f5aa46115ad07ca"}, + {file = "propcache-0.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:160291c60081f23ee43d44b08a7e5fb76681221a8e10b3139618c5a9a291b84e"}, + {file = "propcache-0.2.1-cp311-cp311-win32.whl", hash = "sha256:819ce3b883b7576ca28da3861c7e1a88afd08cc8c96908e08a3f4dd64a228034"}, + {file = "propcache-0.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:edc9fc7051e3350643ad929df55c451899bb9ae6d24998a949d2e4c87fb596d3"}, + {file = "propcache-0.2.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:081a430aa8d5e8876c6909b67bd2d937bfd531b0382d3fdedb82612c618bc41a"}, + {file = "propcache-0.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2ccec9ac47cf4e04897619c0e0c1a48c54a71bdf045117d3a26f80d38ab1fb0"}, + {file = "propcache-0.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:14d86fe14b7e04fa306e0c43cdbeebe6b2c2156a0c9ce56b815faacc193e320d"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:049324ee97bb67285b49632132db351b41e77833678432be52bdd0289c0e05e4"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1cd9a1d071158de1cc1c71a26014dcdfa7dd3d5f4f88c298c7f90ad6f27bb46d"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98110aa363f1bb4c073e8dcfaefd3a5cea0f0834c2aab23dda657e4dab2f53b5"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:647894f5ae99c4cf6bb82a1bb3a796f6e06af3caa3d32e26d2350d0e3e3faf24"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfd3223c15bebe26518d58ccf9a39b93948d3dcb3e57a20480dfdd315356baff"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d71264a80f3fcf512eb4f18f59423fe82d6e346ee97b90625f283df56aee103f"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e73091191e4280403bde6c9a52a6999d69cdfde498f1fdf629105247599b57ec"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3935bfa5fede35fb202c4b569bb9c042f337ca4ff7bd540a0aa5e37131659348"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f508b0491767bb1f2b87fdfacaba5f7eddc2f867740ec69ece6d1946d29029a6"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:1672137af7c46662a1c2be1e8dc78cb6d224319aaa40271c9257d886be4363a6"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b74c261802d3d2b85c9df2dfb2fa81b6f90deeef63c2db9f0e029a3cac50b518"}, + {file = "propcache-0.2.1-cp312-cp312-win32.whl", hash = "sha256:d09c333d36c1409d56a9d29b3a1b800a42c76a57a5a8907eacdbce3f18768246"}, + {file = "propcache-0.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:c214999039d4f2a5b2073ac506bba279945233da8c786e490d411dfc30f855c1"}, + {file = "propcache-0.2.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aca405706e0b0a44cc6bfd41fbe89919a6a56999157f6de7e182a990c36e37bc"}, + {file = "propcache-0.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:12d1083f001ace206fe34b6bdc2cb94be66d57a850866f0b908972f90996b3e9"}, + {file = "propcache-0.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d93f3307ad32a27bda2e88ec81134b823c240aa3abb55821a8da553eed8d9439"}, + {file = "propcache-0.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba278acf14471d36316159c94a802933d10b6a1e117b8554fe0d0d9b75c9d536"}, + {file = "propcache-0.2.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4e6281aedfca15301c41f74d7005e6e3f4ca143584ba696ac69df4f02f40d629"}, + {file = "propcache-0.2.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b750a8e5a1262434fb1517ddf64b5de58327f1adc3524a5e44c2ca43305eb0b"}, + {file = "propcache-0.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf72af5e0fb40e9babf594308911436c8efde3cb5e75b6f206c34ad18be5c052"}, + {file = "propcache-0.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2d0a12018b04f4cb820781ec0dffb5f7c7c1d2a5cd22bff7fb055a2cb19ebce"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e800776a79a5aabdb17dcc2346a7d66d0777e942e4cd251defeb084762ecd17d"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:4160d9283bd382fa6c0c2b5e017acc95bc183570cd70968b9202ad6d8fc48dce"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:30b43e74f1359353341a7adb783c8f1b1c676367b011709f466f42fda2045e95"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:58791550b27d5488b1bb52bc96328456095d96206a250d28d874fafe11b3dfaf"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:0f022d381747f0dfe27e99d928e31bc51a18b65bb9e481ae0af1380a6725dd1f"}, + {file = "propcache-0.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:297878dc9d0a334358f9b608b56d02e72899f3b8499fc6044133f0d319e2ec30"}, + {file = "propcache-0.2.1-cp313-cp313-win32.whl", hash = "sha256:ddfab44e4489bd79bda09d84c430677fc7f0a4939a73d2bba3073036f487a0a6"}, + {file = "propcache-0.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:556fc6c10989f19a179e4321e5d678db8eb2924131e64652a51fe83e4c3db0e1"}, + {file = "propcache-0.2.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6a9a8c34fb7bb609419a211e59da8887eeca40d300b5ea8e56af98f6fbbb1541"}, + {file = "propcache-0.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae1aa1cd222c6d205853b3013c69cd04515f9d6ab6de4b0603e2e1c33221303e"}, + {file = "propcache-0.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:accb6150ce61c9c4b7738d45550806aa2b71c7668c6942f17b0ac182b6142fd4"}, + {file = "propcache-0.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5eee736daafa7af6d0a2dc15cc75e05c64f37fc37bafef2e00d77c14171c2097"}, + {file = "propcache-0.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7a31fc1e1bd362874863fdeed71aed92d348f5336fd84f2197ba40c59f061bd"}, + {file = "propcache-0.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba4cfa1052819d16699e1d55d18c92b6e094d4517c41dd231a8b9f87b6fa681"}, + {file = "propcache-0.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f089118d584e859c62b3da0892b88a83d611c2033ac410e929cb6754eec0ed16"}, + {file = "propcache-0.2.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:781e65134efaf88feb447e8c97a51772aa75e48b794352f94cb7ea717dedda0d"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31f5af773530fd3c658b32b6bdc2d0838543de70eb9a2156c03e410f7b0d3aae"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:a7a078f5d37bee6690959c813977da5291b24286e7b962e62a94cec31aa5188b"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:cea7daf9fc7ae6687cf1e2c049752f19f146fdc37c2cc376e7d0032cf4f25347"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:8b3489ff1ed1e8315674d0775dc7d2195fb13ca17b3808721b54dbe9fd020faf"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9403db39be1393618dd80c746cb22ccda168efce239c73af13c3763ef56ffc04"}, + {file = "propcache-0.2.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5d97151bc92d2b2578ff7ce779cdb9174337390a535953cbb9452fb65164c587"}, + {file = "propcache-0.2.1-cp39-cp39-win32.whl", hash = "sha256:9caac6b54914bdf41bcc91e7eb9147d331d29235a7c967c150ef5df6464fd1bb"}, + {file = "propcache-0.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:92fc4500fcb33899b05ba73276dfb684a20d31caa567b7cb5252d48f896a91b1"}, + {file = "propcache-0.2.1-py3-none-any.whl", hash = "sha256:52277518d6aae65536e9cea52d4e7fd2f7a66f4aa2d30ed3f2fcea620ace3c54"}, + {file = "propcache-0.2.1.tar.gz", hash = "sha256:3f77ce728b19cb537714499928fe800c3dda29e8d9428778fc7c186da4c09a64"}, +] + +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "platform_python_implementation != \"CPython\"" +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + +[[package]] +name = "pydantic" +version = "2.10.6" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, + {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.27.2" +typing-extensions = ">=4.12.2" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] + +[[package]] +name = "pydantic-core" +version = "2.27.2" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, + {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pydantic-settings" +version = "2.7.1" +description = "Settings management using Pydantic" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pydantic_settings-2.7.1-py3-none-any.whl", hash = "sha256:590be9e6e24d06db33a4262829edef682500ef008565a969c73d39d5f8bfb3fd"}, + {file = "pydantic_settings-2.7.1.tar.gz", hash = "sha256:10c9caad35e64bfb3c2fbf70a078c0e25cc92499782e5200747f942a065dec93"}, +] + +[package.dependencies] +pydantic = ">=2.7.0" +python-dotenv = ">=0.21.0" + +[package.extras] +azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"] +toml = ["tomli (>=2.0.1)"] +yaml = ["pyyaml (>=6.0.1)"] + +[[package]] +name = "pylint" +version = "3.3.4" +description = "python code static checker" +optional = false +python-versions = ">=3.9.0" +groups = ["main"] +files = [ + {file = "pylint-3.3.4-py3-none-any.whl", hash = "sha256:289e6a1eb27b453b08436478391a48cd53bb0efb824873f949e709350f3de018"}, + {file = "pylint-3.3.4.tar.gz", hash = "sha256:74ae7a38b177e69a9b525d0794bd8183820bfa7eb68cc1bee6e8ed22a42be4ce"}, +] + +[package.dependencies] +astroid = ">=3.3.8,<=3.4.0-dev0" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +dill = [ + {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, + {version = ">=0.3.6", markers = "python_version == \"3.11\""}, +] +isort = ">=4.2.5,<5.13.0 || >5.13.0,<7" +mccabe = ">=0.6,<0.8" +platformdirs = ">=2.2.0" +tomlkit = ">=0.10.1" + +[package.extras] +spelling = ["pyenchant (>=3.2,<4.0)"] +testutils = ["gitpython (>3)"] + +[[package]] +name = "pyproject-api" +version = "1.9.0" +description = "API to interact with the python pyproject.toml based projects" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pyproject_api-1.9.0-py3-none-any.whl", hash = "sha256:326df9d68dea22d9d98b5243c46e3ca3161b07a1b9b18e213d1e24fd0e605766"}, + {file = "pyproject_api-1.9.0.tar.gz", hash = "sha256:7e8a9854b2dfb49454fae421cb86af43efbb2b2454e5646ffb7623540321ae6e"}, +] + +[package.dependencies] +packaging = ">=24.2" + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx-autodoc-typehints (>=3)"] +testing = ["covdefaults (>=2.3)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "setuptools (>=75.8)"] + +[[package]] +name = "pytest" +version = "8.3.4" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, + {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.25.3" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pytest_asyncio-0.25.3-py3-none-any.whl", hash = "sha256:9e89518e0f9bd08928f97a3482fdc4e244df17529460bc038291ccaf8f85c7c3"}, + {file = "pytest_asyncio-0.25.3.tar.gz", hash = "sha256:fc1da2cf9f125ada7e710b4ddad05518d4cee187ae9412e9ac9271003497f07a"}, +] + +[package.dependencies] +pytest = ">=8.2,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "pytest-cov" +version = "6.0.0" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pytest-cov-6.0.0.tar.gz", hash = "sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0"}, + {file = "pytest_cov-6.0.0-py3-none-any.whl", hash = "sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35"}, +] + +[package.dependencies] +coverage = {version = ">=7.5", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] + +[[package]] +name = "python-dotenv" +version = "1.0.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, + {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "pyventus" +version = "0.6.0" +description = "A powerful Python package for event-driven programming; define, emit, and orchestrate events with ease." +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "pyventus-0.6.0-py3-none-any.whl", hash = "sha256:fab55c04cc1c08c57af60116eb54e309c38245c0a4ef70d19b54f6c6bd44dafa"}, + {file = "pyventus-0.6.0.tar.gz", hash = "sha256:1950f8ac7a6591fe3a36c66a79fdceea03ec34f9ad064b2e7e66120c8031cc6a"}, +] + +[package.extras] +all = ["celery (>=5.3.5)", "fastapi (>=0.95.2)", "rq (>=1.15.0)"] +celery = ["celery (>=5.3.5)"] +dev = ["black (>=23.12.0)", "celery (>=5.3.5)", "coverage[toml] (>=7.3.3)", "fakeredis (>=2.20.0)", "fastapi (>=0.95.2)", "hatch (>=1.8.1)", "httpx (>=0.23.0)", "mike (>=2.1.3)", "mkdocs-git-committers-plugin-2 (>=2.3.0)", "mkdocs-git-revision-date-localized-plugin (>=1.2.1)", "mkdocs-material (>=9.5.17)", "mkdocs-material[imaging]", "mkdocstrings[python] (>=0.24.0)", "mypy (>=1.7.1)", "pytest (>=7.4.0)", "pytest-asyncio (>=0.21.0)", "rq (>=1.15.0)"] +docs = ["mike (>=2.1.3)", "mkdocs-git-committers-plugin-2 (>=2.3.0)", "mkdocs-git-revision-date-localized-plugin (>=1.2.1)", "mkdocs-material (>=9.5.17)", "mkdocs-material[imaging]", "mkdocstrings[python] (>=0.24.0)"] +fastapi = ["fastapi (>=0.95.2)"] +rq = ["rq (>=1.15.0)"] +tests = ["black (>=23.12.0)", "celery (>=5.3.5)", "coverage[toml] (>=7.3.3)", "fakeredis (>=2.20.0)", "fastapi (>=0.95.2)", "httpx (>=0.23.0)", "mypy (>=1.7.1)", "pytest (>=7.4.0)", "pytest-asyncio (>=0.21.0)", "rq (>=1.15.0)"] + +[[package]] +name = "pywin32" +version = "308" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +groups = ["dev"] +markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\"" +files = [ + {file = "pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e"}, + {file = "pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e"}, + {file = "pywin32-308-cp310-cp310-win_arm64.whl", hash = "sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c"}, + {file = "pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a"}, + {file = "pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b"}, + {file = "pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6"}, + {file = "pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897"}, + {file = "pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47"}, + {file = "pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091"}, + {file = "pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed"}, + {file = "pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4"}, + {file = "pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd"}, + {file = "pywin32-308-cp37-cp37m-win32.whl", hash = "sha256:1f696ab352a2ddd63bd07430080dd598e6369152ea13a25ebcdd2f503a38f1ff"}, + {file = "pywin32-308-cp37-cp37m-win_amd64.whl", hash = "sha256:13dcb914ed4347019fbec6697a01a0aec61019c1046c2b905410d197856326a6"}, + {file = "pywin32-308-cp38-cp38-win32.whl", hash = "sha256:5794e764ebcabf4ff08c555b31bd348c9025929371763b2183172ff4708152f0"}, + {file = "pywin32-308-cp38-cp38-win_amd64.whl", hash = "sha256:3b92622e29d651c6b783e368ba7d6722b1634b8e70bd376fd7610fe1992e19de"}, + {file = "pywin32-308-cp39-cp39-win32.whl", hash = "sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341"}, + {file = "pywin32-308-cp39-cp39-win_amd64.whl", hash = "sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "questionary" +version = "2.1.0" +description = "Python library to build pretty command line user prompts โญ๏ธ" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "questionary-2.1.0-py3-none-any.whl", hash = "sha256:44174d237b68bc828e4878c763a9ad6790ee61990e0ae72927694ead57bab8ec"}, + {file = "questionary-2.1.0.tar.gz", hash = "sha256:6302cdd645b19667d8f6e6634774e9538bfcd1aad9be287e743d96cacaf95587"}, +] + +[package.dependencies] +prompt_toolkit = ">=2.0,<4.0" + +[[package]] +name = "referencing" +version = "0.36.2" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"}, + {file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" +typing-extensions = {version = ">=4.4.0", markers = "python_version < \"3.13\""} + +[[package]] +name = "regex" +version = "2024.11.6" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, + {file = "regex-2024.11.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164d8b7b3b4bcb2068b97428060b2a53be050085ef94eca7f240e7947f1b080e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3660c82f209655a06b587d55e723f0b813d3a7db2e32e5e7dc64ac2a9e86fde"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d22326fcdef5e08c154280b71163ced384b428343ae16a5ab2b3354aed12436e"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1ac758ef6aebfc8943560194e9fd0fa18bcb34d89fd8bd2af18183afd8da3a2"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997d6a487ff00807ba810e0f8332c18b4eb8d29463cfb7c820dc4b6e7562d0cf"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02a02d2bb04fec86ad61f3ea7f49c015a0681bf76abb9857f945d26159d2968c"}, + {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f02f93b92358ee3f78660e43b4b0091229260c5d5c408d17d60bf26b6c900e86"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:06eb1be98df10e81ebaded73fcd51989dcf534e3c753466e4b60c4697a003b67"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:040df6fe1a5504eb0f04f048e6d09cd7c7110fef851d7c567a6b6e09942feb7d"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabbfc59f2c6edba2a6622c647b716e34e8e3867e0ab975412c5c2f79b82da2"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8447d2d39b5abe381419319f942de20b7ecd60ce86f16a23b0698f22e1b70008"}, + {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:da8f5fc57d1933de22a9e23eec290a0d8a5927a5370d24bda9a6abe50683fe62"}, + {file = "regex-2024.11.6-cp310-cp310-win32.whl", hash = "sha256:b489578720afb782f6ccf2840920f3a32e31ba28a4b162e13900c3e6bd3f930e"}, + {file = "regex-2024.11.6-cp310-cp310-win_amd64.whl", hash = "sha256:5071b2093e793357c9d8b2929dfc13ac5f0a6c650559503bb81189d0a3814519"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5478c6962ad548b54a591778e93cd7c456a7a29f8eca9c49e4f9a806dcc5d638"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c89a8cc122b25ce6945f0423dc1352cb9593c68abd19223eebbd4e56612c5b7"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94d87b689cdd831934fa3ce16cc15cd65748e6d689f5d2b8f4f4df2065c9fa20"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1062b39a0a2b75a9c694f7a08e7183a80c63c0d62b301418ffd9c35f55aaa114"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:167ed4852351d8a750da48712c3930b031f6efdaa0f22fa1933716bfcd6bf4a3"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d548dafee61f06ebdb584080621f3e0c23fff312f0de1afc776e2a2ba99a74f"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a19f302cd1ce5dd01a9099aaa19cae6173306d1302a43b627f62e21cf18ac0"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bec9931dfb61ddd8ef2ebc05646293812cb6b16b60cf7c9511a832b6f1854b55"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9714398225f299aa85267fd222f7142fcb5c769e73d7733344efc46f2ef5cf89"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:202eb32e89f60fc147a41e55cb086db2a3f8cb82f9a9a88440dcfc5d37faae8d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:4181b814e56078e9b00427ca358ec44333765f5ca1b45597ec7446d3a1ef6e34"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:068376da5a7e4da51968ce4c122a7cd31afaaec4fccc7856c92f63876e57b51d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f2c4184420d881a3475fb2c6f4d95d53a8d50209a2500723d831036f7c45"}, + {file = "regex-2024.11.6-cp311-cp311-win32.whl", hash = "sha256:c36f9b6f5f8649bb251a5f3f66564438977b7ef8386a52460ae77e6070d309d9"}, + {file = "regex-2024.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:02e28184be537f0e75c1f9b2f8847dc51e08e6e171c6bde130b2687e0c33cf60"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad"}, + {file = "regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54"}, + {file = "regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d"}, + {file = "regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff"}, + {file = "regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3a51ccc315653ba012774efca4f23d1d2a8a8f278a6072e29c7147eee7da446b"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ad182d02e40de7459b73155deb8996bbd8e96852267879396fb274e8700190e3"}, + {file = "regex-2024.11.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba9b72e5643641b7d41fa1f6d5abda2c9a263ae835b917348fc3c928182ad467"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40291b1b89ca6ad8d3f2b82782cc33807f1406cf68c8d440861da6304d8ffbbd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cdf58d0e516ee426a48f7b2c03a332a4114420716d55769ff7108c37a09951bf"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a36fdf2af13c2b14738f6e973aba563623cb77d753bbbd8d414d18bfaa3105dd"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1cee317bfc014c2419a76bcc87f071405e3966da434e03e13beb45f8aced1a6"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50153825ee016b91549962f970d6a4442fa106832e14c918acd1c8e479916c4f"}, + {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea1bfda2f7162605f6e8178223576856b3d791109f15ea99a9f95c16a7636fb5"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:df951c5f4a1b1910f1a99ff42c473ff60f8225baa1cdd3539fe2819d9543e9df"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:072623554418a9911446278f16ecb398fb3b540147a7828c06e2011fa531e773"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f654882311409afb1d780b940234208a252322c24a93b442ca714d119e68086c"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:89d75e7293d2b3e674db7d4d9b1bee7f8f3d1609428e293771d1a962617150cc"}, + {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f65557897fc977a44ab205ea871b690adaef6b9da6afda4790a2484b04293a5f"}, + {file = "regex-2024.11.6-cp38-cp38-win32.whl", hash = "sha256:6f44ec28b1f858c98d3036ad5d7d0bfc568bdd7a74f9c24e25f41ef1ebfd81a4"}, + {file = "regex-2024.11.6-cp38-cp38-win_amd64.whl", hash = "sha256:bb8f74f2f10dbf13a0be8de623ba4f9491faf58c24064f32b65679b021ed0001"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5704e174f8ccab2026bd2f1ab6c510345ae8eac818b613d7d73e785f1310f839"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:220902c3c5cc6af55d4fe19ead504de80eb91f786dc102fbd74894b1551f095e"}, + {file = "regex-2024.11.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7e351589da0850c125f1600a4c4ba3c722efefe16b297de54300f08d734fbf"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5056b185ca113c88e18223183aa1a50e66507769c9640a6ff75859619d73957b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e34b51b650b23ed3354b5a07aab37034d9f923db2a40519139af34f485f77d0"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5670bce7b200273eee1840ef307bfa07cda90b38ae56e9a6ebcc9f50da9c469b"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08986dce1339bc932923e7d1232ce9881499a0e02925f7402fb7c982515419ef"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93c0b12d3d3bc25af4ebbf38f9ee780a487e8bf6954c115b9f015822d3bb8e48"}, + {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:764e71f22ab3b305e7f4c21f1a97e1526a25ebdd22513e251cf376760213da13"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f056bf21105c2515c32372bbc057f43eb02aae2fda61052e2f7622c801f0b4e2"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69ab78f848845569401469da20df3e081e6b5a11cb086de3eed1d48f5ed57c95"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:86fddba590aad9208e2fa8b43b4c098bb0ec74f15718bb6a704e3c63e2cef3e9"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:684d7a212682996d21ca12ef3c17353c021fe9de6049e19ac8481ec35574a70f"}, + {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a03e02f48cd1abbd9f3b7e3586d97c8f7a9721c436f51a5245b3b9483044480b"}, + {file = "regex-2024.11.6-cp39-cp39-win32.whl", hash = "sha256:41758407fc32d5c3c5de163888068cfee69cb4c2be844e7ac517a52770f9af57"}, + {file = "regex-2024.11.6-cp39-cp39-win_amd64.whl", hash = "sha256:b2837718570f95dd41675328e111345f9b7095d821bac435aac173ac80b19983"}, + {file = "regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519"}, +] + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rpds-py" +version = "0.22.3" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "rpds_py-0.22.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:6c7b99ca52c2c1752b544e310101b98a659b720b21db00e65edca34483259967"}, + {file = "rpds_py-0.22.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be2eb3f2495ba669d2a985f9b426c1797b7d48d6963899276d22f23e33d47e37"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70eb60b3ae9245ddea20f8a4190bd79c705a22f8028aaf8bbdebe4716c3fab24"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4041711832360a9b75cfb11b25a6a97c8fb49c07b8bd43d0d02b45d0b499a4ff"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64607d4cbf1b7e3c3c8a14948b99345eda0e161b852e122c6bb71aab6d1d798c"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e69b0a0e2537f26d73b4e43ad7bc8c8efb39621639b4434b76a3de50c6966e"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc27863442d388870c1809a87507727b799c8460573cfbb6dc0eeaef5a11b5ec"}, + {file = "rpds_py-0.22.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e79dd39f1e8c3504be0607e5fc6e86bb60fe3584bec8b782578c3b0fde8d932c"}, + {file = "rpds_py-0.22.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e0fa2d4ec53dc51cf7d3bb22e0aa0143966119f42a0c3e4998293a3dd2856b09"}, + {file = "rpds_py-0.22.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fda7cb070f442bf80b642cd56483b5548e43d366fe3f39b98e67cce780cded00"}, + {file = "rpds_py-0.22.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cff63a0272fcd259dcc3be1657b07c929c466b067ceb1c20060e8d10af56f5bf"}, + {file = "rpds_py-0.22.3-cp310-cp310-win32.whl", hash = "sha256:9bd7228827ec7bb817089e2eb301d907c0d9827a9e558f22f762bb690b131652"}, + {file = "rpds_py-0.22.3-cp310-cp310-win_amd64.whl", hash = "sha256:9beeb01d8c190d7581a4d59522cd3d4b6887040dcfc744af99aa59fef3e041a8"}, + {file = "rpds_py-0.22.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d20cfb4e099748ea39e6f7b16c91ab057989712d31761d3300d43134e26e165f"}, + {file = "rpds_py-0.22.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:68049202f67380ff9aa52f12e92b1c30115f32e6895cd7198fa2a7961621fc5a"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb4f868f712b2dd4bcc538b0a0c1f63a2b1d584c925e69a224d759e7070a12d5"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bc51abd01f08117283c5ebf64844a35144a0843ff7b2983e0648e4d3d9f10dbb"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f3cec041684de9a4684b1572fe28c7267410e02450f4561700ca5a3bc6695a2"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7ef9d9da710be50ff6809fed8f1963fecdfecc8b86656cadfca3bc24289414b0"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59f4a79c19232a5774aee369a0c296712ad0e77f24e62cad53160312b1c1eaa1"}, + {file = "rpds_py-0.22.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a60bce91f81ddaac922a40bbb571a12c1070cb20ebd6d49c48e0b101d87300d"}, + {file = "rpds_py-0.22.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e89391e6d60251560f0a8f4bd32137b077a80d9b7dbe6d5cab1cd80d2746f648"}, + {file = "rpds_py-0.22.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e3fb866d9932a3d7d0c82da76d816996d1667c44891bd861a0f97ba27e84fc74"}, + {file = "rpds_py-0.22.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1352ae4f7c717ae8cba93421a63373e582d19d55d2ee2cbb184344c82d2ae55a"}, + {file = "rpds_py-0.22.3-cp311-cp311-win32.whl", hash = "sha256:b0b4136a252cadfa1adb705bb81524eee47d9f6aab4f2ee4fa1e9d3cd4581f64"}, + {file = "rpds_py-0.22.3-cp311-cp311-win_amd64.whl", hash = "sha256:8bd7c8cfc0b8247c8799080fbff54e0b9619e17cdfeb0478ba7295d43f635d7c"}, + {file = "rpds_py-0.22.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:27e98004595899949bd7a7b34e91fa7c44d7a97c40fcaf1d874168bb652ec67e"}, + {file = "rpds_py-0.22.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1978d0021e943aae58b9b0b196fb4895a25cc53d3956b8e35e0b7682eefb6d56"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:655ca44a831ecb238d124e0402d98f6212ac527a0ba6c55ca26f616604e60a45"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:feea821ee2a9273771bae61194004ee2fc33f8ec7db08117ef9147d4bbcbca8e"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22bebe05a9ffc70ebfa127efbc429bc26ec9e9b4ee4d15a740033efda515cf3d"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3af6e48651c4e0d2d166dc1b033b7042ea3f871504b6805ba5f4fe31581d8d38"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67ba3c290821343c192f7eae1d8fd5999ca2dc99994114643e2f2d3e6138b15"}, + {file = "rpds_py-0.22.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:02fbb9c288ae08bcb34fb41d516d5eeb0455ac35b5512d03181d755d80810059"}, + {file = "rpds_py-0.22.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f56a6b404f74ab372da986d240e2e002769a7d7102cc73eb238a4f72eec5284e"}, + {file = "rpds_py-0.22.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0a0461200769ab3b9ab7e513f6013b7a97fdeee41c29b9db343f3c5a8e2b9e61"}, + {file = "rpds_py-0.22.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8633e471c6207a039eff6aa116e35f69f3156b3989ea3e2d755f7bc41754a4a7"}, + {file = "rpds_py-0.22.3-cp312-cp312-win32.whl", hash = "sha256:593eba61ba0c3baae5bc9be2f5232430453fb4432048de28399ca7376de9c627"}, + {file = "rpds_py-0.22.3-cp312-cp312-win_amd64.whl", hash = "sha256:d115bffdd417c6d806ea9069237a4ae02f513b778e3789a359bc5856e0404cc4"}, + {file = "rpds_py-0.22.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ea7433ce7e4bfc3a85654aeb6747babe3f66eaf9a1d0c1e7a4435bbdf27fea84"}, + {file = "rpds_py-0.22.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6dd9412824c4ce1aca56c47b0991e65bebb7ac3f4edccfd3f156150c96a7bf25"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20070c65396f7373f5df4005862fa162db5d25d56150bddd0b3e8214e8ef45b4"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0b09865a9abc0ddff4e50b5ef65467cd94176bf1e0004184eb915cbc10fc05c5"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3453e8d41fe5f17d1f8e9c383a7473cd46a63661628ec58e07777c2fff7196dc"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f5d36399a1b96e1a5fdc91e0522544580dbebeb1f77f27b2b0ab25559e103b8b"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:009de23c9c9ee54bf11303a966edf4d9087cd43a6003672e6aa7def643d06518"}, + {file = "rpds_py-0.22.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1aef18820ef3e4587ebe8b3bc9ba6e55892a6d7b93bac6d29d9f631a3b4befbd"}, + {file = "rpds_py-0.22.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f60bd8423be1d9d833f230fdbccf8f57af322d96bcad6599e5a771b151398eb2"}, + {file = "rpds_py-0.22.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:62d9cfcf4948683a18a9aff0ab7e1474d407b7bab2ca03116109f8464698ab16"}, + {file = "rpds_py-0.22.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9253fc214112405f0afa7db88739294295f0e08466987f1d70e29930262b4c8f"}, + {file = "rpds_py-0.22.3-cp313-cp313-win32.whl", hash = "sha256:fb0ba113b4983beac1a2eb16faffd76cb41e176bf58c4afe3e14b9c681f702de"}, + {file = "rpds_py-0.22.3-cp313-cp313-win_amd64.whl", hash = "sha256:c58e2339def52ef6b71b8f36d13c3688ea23fa093353f3a4fee2556e62086ec9"}, + {file = "rpds_py-0.22.3-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:f82a116a1d03628a8ace4859556fb39fd1424c933341a08ea3ed6de1edb0283b"}, + {file = "rpds_py-0.22.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3dfcbc95bd7992b16f3f7ba05af8a64ca694331bd24f9157b49dadeeb287493b"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59259dc58e57b10e7e18ce02c311804c10c5a793e6568f8af4dead03264584d1"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5725dd9cc02068996d4438d397e255dcb1df776b7ceea3b9cb972bdb11260a83"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99b37292234e61325e7a5bb9689e55e48c3f5f603af88b1642666277a81f1fbd"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:27b1d3b3915a99208fee9ab092b8184c420f2905b7d7feb4aeb5e4a9c509b8a1"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f612463ac081803f243ff13cccc648578e2279295048f2a8d5eb430af2bae6e3"}, + {file = "rpds_py-0.22.3-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f73d3fef726b3243a811121de45193c0ca75f6407fe66f3f4e183c983573e130"}, + {file = "rpds_py-0.22.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3f21f0495edea7fdbaaa87e633a8689cd285f8f4af5c869f27bc8074638ad69c"}, + {file = "rpds_py-0.22.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:1e9663daaf7a63ceccbbb8e3808fe90415b0757e2abddbfc2e06c857bf8c5e2b"}, + {file = "rpds_py-0.22.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a76e42402542b1fae59798fab64432b2d015ab9d0c8c47ba7addddbaf7952333"}, + {file = "rpds_py-0.22.3-cp313-cp313t-win32.whl", hash = "sha256:69803198097467ee7282750acb507fba35ca22cc3b85f16cf45fb01cb9097730"}, + {file = "rpds_py-0.22.3-cp313-cp313t-win_amd64.whl", hash = "sha256:f5cf2a0c2bdadf3791b5c205d55a37a54025c6e18a71c71f82bb536cf9a454bf"}, + {file = "rpds_py-0.22.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:378753b4a4de2a7b34063d6f95ae81bfa7b15f2c1a04a9518e8644e81807ebea"}, + {file = "rpds_py-0.22.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3445e07bf2e8ecfeef6ef67ac83de670358abf2996916039b16a218e3d95e97e"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b2513ba235829860b13faa931f3b6846548021846ac808455301c23a101689d"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eaf16ae9ae519a0e237a0f528fd9f0197b9bb70f40263ee57ae53c2b8d48aeb3"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:583f6a1993ca3369e0f80ba99d796d8e6b1a3a2a442dd4e1a79e652116413091"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4617e1915a539a0d9a9567795023de41a87106522ff83fbfaf1f6baf8e85437e"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c150c7a61ed4a4f4955a96626574e9baf1adf772c2fb61ef6a5027e52803543"}, + {file = "rpds_py-0.22.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2fa4331c200c2521512595253f5bb70858b90f750d39b8cbfd67465f8d1b596d"}, + {file = "rpds_py-0.22.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:214b7a953d73b5e87f0ebece4a32a5bd83c60a3ecc9d4ec8f1dca968a2d91e99"}, + {file = "rpds_py-0.22.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f47ad3d5f3258bd7058d2d506852217865afefe6153a36eb4b6928758041d831"}, + {file = "rpds_py-0.22.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f276b245347e6e36526cbd4a266a417796fc531ddf391e43574cf6466c492520"}, + {file = "rpds_py-0.22.3-cp39-cp39-win32.whl", hash = "sha256:bbb232860e3d03d544bc03ac57855cd82ddf19c7a07651a7c0fdb95e9efea8b9"}, + {file = "rpds_py-0.22.3-cp39-cp39-win_amd64.whl", hash = "sha256:cfbc454a2880389dbb9b5b398e50d439e2e58669160f27b60e5eca11f68ae17c"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d48424e39c2611ee1b84ad0f44fb3b2b53d473e65de061e3f460fc0be5f1939d"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:24e8abb5878e250f2eb0d7859a8e561846f98910326d06c0d51381fed59357bd"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b232061ca880db21fa14defe219840ad9b74b6158adb52ddf0e87bead9e8493"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac0a03221cdb5058ce0167ecc92a8c89e8d0decdc9e99a2ec23380793c4dcb96"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb0c341fa71df5a4595f9501df4ac5abfb5a09580081dffbd1ddd4654e6e9123"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf9db5488121b596dbfc6718c76092fda77b703c1f7533a226a5a9f65248f8ad"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b8db6b5b2d4491ad5b6bdc2bc7c017eec108acbf4e6785f42a9eb0ba234f4c9"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b3d504047aba448d70cf6fa22e06cb09f7cbd761939fdd47604f5e007675c24e"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:e61b02c3f7a1e0b75e20c3978f7135fd13cb6cf551bf4a6d29b999a88830a338"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:e35ba67d65d49080e8e5a1dd40101fccdd9798adb9b050ff670b7d74fa41c566"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:26fd7cac7dd51011a245f29a2cc6489c4608b5a8ce8d75661bb4a1066c52dfbe"}, + {file = "rpds_py-0.22.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:177c7c0fce2855833819c98e43c262007f42ce86651ffbb84f37883308cb0e7d"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bb47271f60660803ad11f4c61b42242b8c1312a31c98c578f79ef9387bbde21c"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:70fb28128acbfd264eda9bf47015537ba3fe86e40d046eb2963d75024be4d055"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44d61b4b7d0c2c9ac019c314e52d7cbda0ae31078aabd0f22e583af3e0d79723"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f0e260eaf54380380ac3808aa4ebe2d8ca28b9087cf411649f96bad6900c728"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b25bc607423935079e05619d7de556c91fb6adeae9d5f80868dde3468657994b"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fb6116dfb8d1925cbdb52595560584db42a7f664617a1f7d7f6e32f138cdf37d"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a63cbdd98acef6570c62b92a1e43266f9e8b21e699c363c0fef13bd530799c11"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b8f60e1b739a74bab7e01fcbe3dddd4657ec685caa04681df9d562ef15b625f"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:2e8b55d8517a2fda8d95cb45d62a5a8bbf9dd0ad39c5b25c8833efea07b880ca"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:2de29005e11637e7a2361fa151f780ff8eb2543a0da1413bb951e9f14b699ef3"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:666ecce376999bf619756a24ce15bb14c5bfaf04bf00abc7e663ce17c3f34fe7"}, + {file = "rpds_py-0.22.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:5246b14ca64a8675e0a7161f7af68fe3e910e6b90542b4bfb5439ba752191df6"}, + {file = "rpds_py-0.22.3.tar.gz", hash = "sha256:e32fee8ab45d3c2db6da19a5323bc3362237c8b653c70194414b892fd06a080d"}, +] + +[[package]] +name = "ruff" +version = "0.9.6" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "ruff-0.9.6-py3-none-linux_armv6l.whl", hash = "sha256:2f218f356dd2d995839f1941322ff021c72a492c470f0b26a34f844c29cdf5ba"}, + {file = "ruff-0.9.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b908ff4df65dad7b251c9968a2e4560836d8f5487c2f0cc238321ed951ea0504"}, + {file = "ruff-0.9.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:b109c0ad2ececf42e75fa99dc4043ff72a357436bb171900714a9ea581ddef83"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1de4367cca3dac99bcbd15c161404e849bb0bfd543664db39232648dc00112dc"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac3ee4d7c2c92ddfdaedf0bf31b2b176fa7aa8950efc454628d477394d35638b"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dc1edd1775270e6aa2386119aea692039781429f0be1e0949ea5884e011aa8e"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4a091729086dffa4bd070aa5dab7e39cc6b9d62eb2bef8f3d91172d30d599666"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1bbc6808bf7b15796cef0815e1dfb796fbd383e7dbd4334709642649625e7c5"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:589d1d9f25b5754ff230dce914a174a7c951a85a4e9270613a2b74231fdac2f5"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc61dd5131742e21103fbbdcad683a8813be0e3c204472d520d9a5021ca8b217"}, + {file = "ruff-0.9.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:5e2d9126161d0357e5c8f30b0bd6168d2c3872372f14481136d13de9937f79b6"}, + {file = "ruff-0.9.6-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:68660eab1a8e65babb5229a1f97b46e3120923757a68b5413d8561f8a85d4897"}, + {file = "ruff-0.9.6-py3-none-musllinux_1_2_i686.whl", hash = "sha256:c4cae6c4cc7b9b4017c71114115db0445b00a16de3bcde0946273e8392856f08"}, + {file = "ruff-0.9.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:19f505b643228b417c1111a2a536424ddde0db4ef9023b9e04a46ed8a1cb4656"}, + {file = "ruff-0.9.6-py3-none-win32.whl", hash = "sha256:194d8402bceef1b31164909540a597e0d913c0e4952015a5b40e28c146121b5d"}, + {file = "ruff-0.9.6-py3-none-win_amd64.whl", hash = "sha256:03482d5c09d90d4ee3f40d97578423698ad895c87314c4de39ed2af945633caa"}, + {file = "ruff-0.9.6-py3-none-win_arm64.whl", hash = "sha256:0e2bb706a2be7ddfea4a4af918562fdc1bcb16df255e5fa595bbd800ce322a5a"}, + {file = "ruff-0.9.6.tar.gz", hash = "sha256:81761592f72b620ec8fa1068a6fd00e98a5ebee342a3642efd84454f3031dca9"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "socksio" +version = "1.0.0" +description = "Sans-I/O implementation of SOCKS4, SOCKS4A, and SOCKS5." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "socksio-1.0.0-py3-none-any.whl", hash = "sha256:95dc1f15f9b34e8d7b16f06d74b8ccf48f609af32ab33c608d08761c5dcbb1f3"}, + {file = "socksio-1.0.0.tar.gz", hash = "sha256:f88beb3da5b5c38b9890469de67d0cb0f9d494b78b106ca1845f96c10b91c4ac"}, +] + +[[package]] +name = "soupsieve" +version = "2.6" +description = "A modern CSS selector implementation for Beautiful Soup." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"}, + {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"}, +] + +[[package]] +name = "sse-starlette" +version = "2.2.1" +description = "SSE plugin for Starlette" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "sse_starlette-2.2.1-py3-none-any.whl", hash = "sha256:6410a3d3ba0c89e7675d4c273a301d64649c03a5ef1ca101f10b47f895fd0e99"}, + {file = "sse_starlette-2.2.1.tar.gz", hash = "sha256:54470d5f19274aeed6b2d473430b08b4b379ea851d953b11d7f1c4a2c118b419"}, +] + +[package.dependencies] +anyio = ">=4.7.0" +starlette = ">=0.41.3" + +[package.extras] +examples = ["fastapi"] +uvicorn = ["uvicorn (>=0.34.0)"] + +[[package]] +name = "starlette" +version = "0.45.3" +description = "The little ASGI library that shines." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "starlette-0.45.3-py3-none-any.whl", hash = "sha256:dfb6d332576f136ec740296c7e8bb8c8a7125044e7c6da30744718880cdd059d"}, + {file = "starlette-0.45.3.tar.gz", hash = "sha256:2cbcba2a75806f8a41c722141486f37c28e30a0921c5f6fe4346cb0dcee1302f"}, +] + +[package.dependencies] +anyio = ">=3.6.2,<5" + +[package.extras] +full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] + +[[package]] +name = "termcolor" +version = "2.5.0" +description = "ANSI color formatting for output in terminal" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "termcolor-2.5.0-py3-none-any.whl", hash = "sha256:37b17b5fc1e604945c2642c872a3764b5d547a48009871aea3edd3afa180afb8"}, + {file = "termcolor-2.5.0.tar.gz", hash = "sha256:998d8d27da6d48442e8e1f016119076b690d962507531df4890fcd2db2ef8a6f"}, +] + +[package.extras] +tests = ["pytest", "pytest-cov"] + +[[package]] +name = "tiktoken" +version = "0.9.0" +description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"}, + {file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"}, + {file = "tiktoken-0.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0968d5beeafbca2a72c595e8385a1a1f8af58feaebb02b227229b69ca5357fd"}, + {file = "tiktoken-0.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92a5fb085a6a3b7350b8fc838baf493317ca0e17bd95e8642f95fc69ecfed1de"}, + {file = "tiktoken-0.9.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:15a2752dea63d93b0332fb0ddb05dd909371ededa145fe6a3242f46724fa7990"}, + {file = "tiktoken-0.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:26113fec3bd7a352e4b33dbaf1bd8948de2507e30bd95a44e2b1156647bc01b4"}, + {file = "tiktoken-0.9.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:f32cc56168eac4851109e9b5d327637f15fd662aa30dd79f964b7c39fbadd26e"}, + {file = "tiktoken-0.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:45556bc41241e5294063508caf901bf92ba52d8ef9222023f83d2483a3055348"}, + {file = "tiktoken-0.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03935988a91d6d3216e2ec7c645afbb3d870b37bcb67ada1943ec48678e7ee33"}, + {file = "tiktoken-0.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b3d80aad8d2c6b9238fc1a5524542087c52b860b10cbf952429ffb714bc1136"}, + {file = "tiktoken-0.9.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b2a21133be05dc116b1d0372af051cd2c6aa1d2188250c9b553f9fa49301b336"}, + {file = "tiktoken-0.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:11a20e67fdf58b0e2dea7b8654a288e481bb4fc0289d3ad21291f8d0849915fb"}, + {file = "tiktoken-0.9.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e88f121c1c22b726649ce67c089b90ddda8b9662545a8aeb03cfef15967ddd03"}, + {file = "tiktoken-0.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a6600660f2f72369acb13a57fb3e212434ed38b045fd8cc6cdd74947b4b5d210"}, + {file = "tiktoken-0.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95e811743b5dfa74f4b227927ed86cbc57cad4df859cb3b643be797914e41794"}, + {file = "tiktoken-0.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99376e1370d59bcf6935c933cb9ba64adc29033b7e73f5f7569f3aad86552b22"}, + {file = "tiktoken-0.9.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:badb947c32739fb6ddde173e14885fb3de4d32ab9d8c591cbd013c22b4c31dd2"}, + {file = "tiktoken-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:5a62d7a25225bafed786a524c1b9f0910a1128f4232615bf3f8257a73aaa3b16"}, + {file = "tiktoken-0.9.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b0e8e05a26eda1249e824156d537015480af7ae222ccb798e5234ae0285dbdb"}, + {file = "tiktoken-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:27d457f096f87685195eea0165a1807fae87b97b2161fe8c9b1df5bd74ca6f63"}, + {file = "tiktoken-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cf8ded49cddf825390e36dd1ad35cd49589e8161fdcb52aa25f0583e90a3e01"}, + {file = "tiktoken-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc156cb314119a8bb9748257a2eaebd5cc0753b6cb491d26694ed42fc7cb3139"}, + {file = "tiktoken-0.9.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cd69372e8c9dd761f0ab873112aba55a0e3e506332dd9f7522ca466e817b1b7a"}, + {file = "tiktoken-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:5ea0edb6f83dc56d794723286215918c1cde03712cbbafa0348b33448faf5b95"}, + {file = "tiktoken-0.9.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c6386ca815e7d96ef5b4ac61e0048cd32ca5a92d5781255e13b31381d28667dc"}, + {file = "tiktoken-0.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:75f6d5db5bc2c6274b674ceab1615c1778e6416b14705827d19b40e6355f03e0"}, + {file = "tiktoken-0.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e15b16f61e6f4625a57a36496d28dd182a8a60ec20a534c5343ba3cafa156ac7"}, + {file = "tiktoken-0.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebcec91babf21297022882344c3f7d9eed855931466c3311b1ad6b64befb3df"}, + {file = "tiktoken-0.9.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e5fd49e7799579240f03913447c0cdfa1129625ebd5ac440787afc4345990427"}, + {file = "tiktoken-0.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:26242ca9dc8b58e875ff4ca078b9a94d2f0813e6a535dcd2205df5d49d927cc7"}, + {file = "tiktoken-0.9.0.tar.gz", hash = "sha256:d02a5ca6a938e0490e1ff957bc48c8b078c88cb83977be1625b1fd8aac792c5d"}, +] + +[package.dependencies] +regex = ">=2022.1.18" +requests = ">=2.26.0" + +[package.extras] +blobfile = ["blobfile (>=2)"] + +[[package]] +name = "tokenizers" +version = "0.21.0" +description = "" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "tokenizers-0.21.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:3c4c93eae637e7d2aaae3d376f06085164e1660f89304c0ab2b1d08a406636b2"}, + {file = "tokenizers-0.21.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:f53ea537c925422a2e0e92a24cce96f6bc5046bbef24a1652a5edc8ba975f62e"}, + {file = "tokenizers-0.21.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b177fb54c4702ef611de0c069d9169f0004233890e0c4c5bd5508ae05abf193"}, + {file = "tokenizers-0.21.0-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6b43779a269f4629bebb114e19c3fca0223296ae9fea8bb9a7a6c6fb0657ff8e"}, + {file = "tokenizers-0.21.0-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9aeb255802be90acfd363626753fda0064a8df06031012fe7d52fd9a905eb00e"}, + {file = "tokenizers-0.21.0-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8b09dbeb7a8d73ee204a70f94fc06ea0f17dcf0844f16102b9f414f0b7463ba"}, + {file = "tokenizers-0.21.0-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:400832c0904f77ce87c40f1a8a27493071282f785724ae62144324f171377273"}, + {file = "tokenizers-0.21.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e84ca973b3a96894d1707e189c14a774b701596d579ffc7e69debfc036a61a04"}, + {file = "tokenizers-0.21.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:eb7202d231b273c34ec67767378cd04c767e967fda12d4a9e36208a34e2f137e"}, + {file = "tokenizers-0.21.0-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:089d56db6782a73a27fd8abf3ba21779f5b85d4a9f35e3b493c7bbcbbf0d539b"}, + {file = "tokenizers-0.21.0-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:c87ca3dc48b9b1222d984b6b7490355a6fdb411a2d810f6f05977258400ddb74"}, + {file = "tokenizers-0.21.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4145505a973116f91bc3ac45988a92e618a6f83eb458f49ea0790df94ee243ff"}, + {file = "tokenizers-0.21.0-cp39-abi3-win32.whl", hash = "sha256:eb1702c2f27d25d9dd5b389cc1f2f51813e99f8ca30d9e25348db6585a97e24a"}, + {file = "tokenizers-0.21.0-cp39-abi3-win_amd64.whl", hash = "sha256:87841da5a25a3a5f70c102de371db120f41873b854ba65e52bccd57df5a3780c"}, + {file = "tokenizers-0.21.0.tar.gz", hash = "sha256:ee0894bf311b75b0c03079f33859ae4b2334d675d4e93f5a4132e1eae2834fe4"}, +] + +[package.dependencies] +huggingface-hub = ">=0.16.4,<1.0" + +[package.extras] +dev = ["tokenizers[testing]"] +docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"] + +[[package]] +name = "tomlkit" +version = "0.13.2" +description = "Style preserving TOML library" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, + {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, +] + +[[package]] +name = "tox" +version = "4.24.1" +description = "tox is a generic virtualenv management and test command line tool" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "tox-4.24.1-py3-none-any.whl", hash = "sha256:57ba7df7d199002c6df8c2db9e6484f3de6ca8f42013c083ea2d4d1e5c6bdc75"}, + {file = "tox-4.24.1.tar.gz", hash = "sha256:083a720adbc6166fff0b7d1df9d154f9d00bfccb9403b8abf6bc0ee435d6a62e"}, +] + +[package.dependencies] +cachetools = ">=5.5" +chardet = ">=5.2" +colorama = ">=0.4.6" +filelock = ">=3.16.1" +packaging = ">=24.2" +platformdirs = ">=4.3.6" +pluggy = ">=1.5" +pyproject-api = ">=1.8" +virtualenv = ">=20.27.1" + +[package.extras] +test = ["devpi-process (>=1.0.2)", "pytest (>=8.3.3)", "pytest-mock (>=3.14)"] + +[[package]] +name = "tqdm" +version = "4.67.1" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, + {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["nbval", "pytest (>=6)", "pytest-asyncio (>=0.24)", "pytest-cov", "pytest-timeout"] +discord = ["requests"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "traitlets" +version = "5.14.3" +description = "Traitlets Python configuration system" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, + {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, +] + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] + +[[package]] +name = "types-requests" +version = "2.32.0.20241016" +description = "Typing stubs for requests" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"}, + {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"}, +] + +[package.dependencies] +urllib3 = ">=2" + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +groups = ["main", "dev"] +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "urllib3" +version = "2.3.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"}, + {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "uvicorn" +version = "0.34.0" +description = "The lightning-fast ASGI server." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4"}, + {file = "uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9"}, +] + +[package.dependencies] +click = ">=7.0" +h11 = ">=0.8" + +[package.extras] +standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"] + +[[package]] +name = "virtualenv" +version = "20.29.2" +description = "Virtual Python Environment builder" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "virtualenv-20.29.2-py3-none-any.whl", hash = "sha256:febddfc3d1ea571bdb1dc0f98d7b45d24def7428214d4fb73cc486c9568cce6a"}, + {file = "virtualenv-20.29.2.tar.gz", hash = "sha256:fdaabebf6d03b5ba83ae0a02cfe96f48a716f4fae556461d180825866f75b728"}, +] + +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<5" + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] + +[[package]] +name = "wcwidth" +version = "0.2.13" +description = "Measures the displayed width of unicode strings in a terminal" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, + {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, +] + +[[package]] +name = "wikipedia" +version = "1.4.0" +description = "Wikipedia API for Python" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "wikipedia-1.4.0.tar.gz", hash = "sha256:db0fad1829fdd441b1852306e9856398204dc0786d2996dd2e0c8bb8e26133b2"}, +] + +[package.dependencies] +beautifulsoup4 = "*" +requests = ">=2.0.0,<3.0.0" + +[[package]] +name = "yarl" +version = "1.18.3" +description = "Yet another URL library" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "yarl-1.18.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7df647e8edd71f000a5208fe6ff8c382a1de8edfbccdbbfe649d263de07d8c34"}, + {file = "yarl-1.18.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c69697d3adff5aa4f874b19c0e4ed65180ceed6318ec856ebc423aa5850d84f7"}, + {file = "yarl-1.18.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:602d98f2c2d929f8e697ed274fbadc09902c4025c5a9963bf4e9edfc3ab6f7ed"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c654d5207c78e0bd6d749f6dae1dcbbfde3403ad3a4b11f3c5544d9906969dde"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5094d9206c64181d0f6e76ebd8fb2f8fe274950a63890ee9e0ebfd58bf9d787b"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35098b24e0327fc4ebdc8ffe336cee0a87a700c24ffed13161af80124b7dc8e5"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3236da9272872443f81fedc389bace88408f64f89f75d1bdb2256069a8730ccc"}, + {file = "yarl-1.18.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2c08cc9b16f4f4bc522771d96734c7901e7ebef70c6c5c35dd0f10845270bcd"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80316a8bd5109320d38eef8833ccf5f89608c9107d02d2a7f985f98ed6876990"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:c1e1cc06da1491e6734f0ea1e6294ce00792193c463350626571c287c9a704db"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fea09ca13323376a2fdfb353a5fa2e59f90cd18d7ca4eaa1fd31f0a8b4f91e62"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e3b9fd71836999aad54084906f8663dffcd2a7fb5cdafd6c37713b2e72be1760"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:757e81cae69244257d125ff31663249b3013b5dc0a8520d73694aed497fb195b"}, + {file = "yarl-1.18.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b1771de9944d875f1b98a745bc547e684b863abf8f8287da8466cf470ef52690"}, + {file = "yarl-1.18.3-cp310-cp310-win32.whl", hash = "sha256:8874027a53e3aea659a6d62751800cf6e63314c160fd607489ba5c2edd753cf6"}, + {file = "yarl-1.18.3-cp310-cp310-win_amd64.whl", hash = "sha256:93b2e109287f93db79210f86deb6b9bbb81ac32fc97236b16f7433db7fc437d8"}, + {file = "yarl-1.18.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8503ad47387b8ebd39cbbbdf0bf113e17330ffd339ba1144074da24c545f0069"}, + {file = "yarl-1.18.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:02ddb6756f8f4517a2d5e99d8b2f272488e18dd0bfbc802f31c16c6c20f22193"}, + {file = "yarl-1.18.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:67a283dd2882ac98cc6318384f565bffc751ab564605959df4752d42483ad889"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d980e0325b6eddc81331d3f4551e2a333999fb176fd153e075c6d1c2530aa8a8"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b643562c12680b01e17239be267bc306bbc6aac1f34f6444d1bded0c5ce438ca"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c017a3b6df3a1bd45b9fa49a0f54005e53fbcad16633870104b66fa1a30a29d8"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75674776d96d7b851b6498f17824ba17849d790a44d282929c42dbb77d4f17ae"}, + {file = "yarl-1.18.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ccaa3a4b521b780a7e771cc336a2dba389a0861592bbce09a476190bb0c8b4b3"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2d06d3005e668744e11ed80812e61efd77d70bb7f03e33c1598c301eea20efbb"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:9d41beda9dc97ca9ab0b9888cb71f7539124bc05df02c0cff6e5acc5a19dcc6e"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ba23302c0c61a9999784e73809427c9dbedd79f66a13d84ad1b1943802eaaf59"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:6748dbf9bfa5ba1afcc7556b71cda0d7ce5f24768043a02a58846e4a443d808d"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0b0cad37311123211dc91eadcb322ef4d4a66008d3e1bdc404808992260e1a0e"}, + {file = "yarl-1.18.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0fb2171a4486bb075316ee754c6d8382ea6eb8b399d4ec62fde2b591f879778a"}, + {file = "yarl-1.18.3-cp311-cp311-win32.whl", hash = "sha256:61b1a825a13bef4a5f10b1885245377d3cd0bf87cba068e1d9a88c2ae36880e1"}, + {file = "yarl-1.18.3-cp311-cp311-win_amd64.whl", hash = "sha256:b9d60031cf568c627d028239693fd718025719c02c9f55df0a53e587aab951b5"}, + {file = "yarl-1.18.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1dd4bdd05407ced96fed3d7f25dbbf88d2ffb045a0db60dbc247f5b3c5c25d50"}, + {file = "yarl-1.18.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7c33dd1931a95e5d9a772d0ac5e44cac8957eaf58e3c8da8c1414de7dd27c576"}, + {file = "yarl-1.18.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b411eddcfd56a2f0cd6a384e9f4f7aa3efee14b188de13048c25b5e91f1640"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:436c4fc0a4d66b2badc6c5fc5ef4e47bb10e4fd9bf0c79524ac719a01f3607c2"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e35ef8683211db69ffe129a25d5634319a677570ab6b2eba4afa860f54eeaf75"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84b2deecba4a3f1a398df819151eb72d29bfeb3b69abb145a00ddc8d30094512"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00e5a1fea0fd4f5bfa7440a47eff01d9822a65b4488f7cff83155a0f31a2ecba"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0e883008013c0e4aef84dcfe2a0b172c4d23c2669412cf5b3371003941f72bb"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5a3f356548e34a70b0172d8890006c37be92995f62d95a07b4a42e90fba54272"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ccd17349166b1bee6e529b4add61727d3f55edb7babbe4069b5764c9587a8cc6"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b958ddd075ddba5b09bb0be8a6d9906d2ce933aee81100db289badbeb966f54e"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c7d79f7d9aabd6011004e33b22bc13056a3e3fb54794d138af57f5ee9d9032cb"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4891ed92157e5430874dad17b15eb1fda57627710756c27422200c52d8a4e393"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ce1af883b94304f493698b00d0f006d56aea98aeb49d75ec7d98cd4a777e9285"}, + {file = "yarl-1.18.3-cp312-cp312-win32.whl", hash = "sha256:f91c4803173928a25e1a55b943c81f55b8872f0018be83e3ad4938adffb77dd2"}, + {file = "yarl-1.18.3-cp312-cp312-win_amd64.whl", hash = "sha256:7e2ee16578af3b52ac2f334c3b1f92262f47e02cc6193c598502bd46f5cd1477"}, + {file = "yarl-1.18.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:90adb47ad432332d4f0bc28f83a5963f426ce9a1a8809f5e584e704b82685dcb"}, + {file = "yarl-1.18.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:913829534200eb0f789d45349e55203a091f45c37a2674678744ae52fae23efa"}, + {file = "yarl-1.18.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ef9f7768395923c3039055c14334ba4d926f3baf7b776c923c93d80195624782"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a19f62ff30117e706ebc9090b8ecc79aeb77d0b1f5ec10d2d27a12bc9f66d0"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e17c9361d46a4d5addf777c6dd5eab0715a7684c2f11b88c67ac37edfba6c482"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a74a13a4c857a84a845505fd2d68e54826a2cd01935a96efb1e9d86c728e186"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41f7ce59d6ee7741af71d82020346af364949314ed3d87553763a2df1829cc58"}, + {file = "yarl-1.18.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f52a265001d830bc425f82ca9eabda94a64a4d753b07d623a9f2863fde532b53"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:82123d0c954dc58db301f5021a01854a85bf1f3bb7d12ae0c01afc414a882ca2"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:2ec9bbba33b2d00999af4631a3397d1fd78290c48e2a3e52d8dd72db3a067ac8"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:fbd6748e8ab9b41171bb95c6142faf068f5ef1511935a0aa07025438dd9a9bc1"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:877d209b6aebeb5b16c42cbb377f5f94d9e556626b1bfff66d7b0d115be88d0a"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:b464c4ab4bfcb41e3bfd3f1c26600d038376c2de3297760dfe064d2cb7ea8e10"}, + {file = "yarl-1.18.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d39d351e7faf01483cc7ff7c0213c412e38e5a340238826be7e0e4da450fdc8"}, + {file = "yarl-1.18.3-cp313-cp313-win32.whl", hash = "sha256:61ee62ead9b68b9123ec24bc866cbef297dd266175d53296e2db5e7f797f902d"}, + {file = "yarl-1.18.3-cp313-cp313-win_amd64.whl", hash = "sha256:578e281c393af575879990861823ef19d66e2b1d0098414855dd367e234f5b3c"}, + {file = "yarl-1.18.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:61e5e68cb65ac8f547f6b5ef933f510134a6bf31bb178be428994b0cb46c2a04"}, + {file = "yarl-1.18.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe57328fbc1bfd0bd0514470ac692630f3901c0ee39052ae47acd1d90a436719"}, + {file = "yarl-1.18.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a440a2a624683108a1b454705ecd7afc1c3438a08e890a1513d468671d90a04e"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09c7907c8548bcd6ab860e5f513e727c53b4a714f459b084f6580b49fa1b9cee"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4f6450109834af88cb4cc5ecddfc5380ebb9c228695afc11915a0bf82116789"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9ca04806f3be0ac6d558fffc2fdf8fcef767e0489d2684a21912cc4ed0cd1b8"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77a6e85b90a7641d2e07184df5557132a337f136250caafc9ccaa4a2a998ca2c"}, + {file = "yarl-1.18.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6333c5a377c8e2f5fae35e7b8f145c617b02c939d04110c76f29ee3676b5f9a5"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0b3c92fa08759dbf12b3a59579a4096ba9af8dd344d9a813fc7f5070d86bbab1"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4ac515b860c36becb81bb84b667466885096b5fc85596948548b667da3bf9f24"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:045b8482ce9483ada4f3f23b3774f4e1bf4f23a2d5c912ed5170f68efb053318"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:a4bb030cf46a434ec0225bddbebd4b89e6471814ca851abb8696170adb163985"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:54d6921f07555713b9300bee9c50fb46e57e2e639027089b1d795ecd9f7fa910"}, + {file = "yarl-1.18.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1d407181cfa6e70077df3377938c08012d18893f9f20e92f7d2f314a437c30b1"}, + {file = "yarl-1.18.3-cp39-cp39-win32.whl", hash = "sha256:ac36703a585e0929b032fbaab0707b75dc12703766d0b53486eabd5139ebadd5"}, + {file = "yarl-1.18.3-cp39-cp39-win_amd64.whl", hash = "sha256:ba87babd629f8af77f557b61e49e7c7cac36f22f871156b91e10a6e9d4f829e9"}, + {file = "yarl-1.18.3-py3-none-any.whl", hash = "sha256:b57f4f58099328dfb26c6a771d09fb20dbbae81d20cfb66141251ea063bd101b"}, + {file = "yarl-1.18.3.tar.gz", hash = "sha256:ac1801c45cbf77b6c99242eeff4fffb5e4e73a800b5c4ad4fc0be5def634d2e1"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" +propcache = ">=0.2.0" + +[[package]] +name = "zipp" +version = "3.21.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, + {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + +[metadata] +lock-version = "2.1" +python-versions = ">= 3.11,<4.0" +content-hash = "166f35b8d63247334b24d0058116d662570f42bd709e5794ca150ce100b885ff" diff --git a/python/pyproject.toml b/python/pyproject.toml new file mode 100644 index 00000000..6e75244d --- /dev/null +++ b/python/pyproject.toml @@ -0,0 +1,267 @@ +[project] +name = "beeai-framework" +version="0.0.1" +license = "Apache-2.0" +readme = "README.md" +authors = [{ name = "IBM Corp." }] +maintainers = [] +requires-python = ">=3.11,<4.0" + +[project.urls] +homepage = "https://iambee.ai" +repository = "https://github.com/i-am-bee/beeai-framework" +documentation = "https://i-am-bee.github.io/beeai-framework/#/python/" + +[tool.poetry.dependencies] +python = ">= 3.11,<4.0" +pydantic = "^2.10" +requests = "^2.32" +pylint = "^3.3.2" +pydantic-settings = "^2.7" +chevron = "^0.14.0" +types-requests = "^2.32.0.20241016" +litellm = "^1.60.2" +aiofiles = "^24.1.0" +pyventus = "^0.6.0" +wikipedia = "^1.4.0" +mcp = "^1.2.0" +duckduckgo-search = "^7.3.2" + +[tool.poetry.group.dev.dependencies] +pytest = "^8.3.4" +commitizen = "^4.2.1" +poethepoet = "^0.32.2" +ruff = "^0.9.6" +tox = "^4.20" +mypy = "^1.15.0" +pytest-asyncio = "^0.25.3" +nbstripout = "^0.8.1" +pytest-cov = "^6.0.0" + +[tool.mypy] +mypy_path = "$MYPY_CONFIG_FILE_DIR/beeai_framework" +exclude = "^build/$" +check_untyped_defs = true +plugins = ["pydantic.mypy"] + +[tool.pydantic-mypy] +init_forbid_extra = true +init_typed = true +warn_required_dynamic_aliases = true + +[tool.commitizen] +name = "cz_monorepo_commits" +tag_format = "python_$version" +version_scheme = "semver" +version_provider = "pep621" +update_changelog_on_bump = true +major_version_zero = true +bump_message = "chore: python release $new_version" + +[tool.pytest.ini_options] +addopts = "-v --cov --cov-report html" +testpaths = ["tests", "beeai_framework"] +python_files = ["test_*.py", "*_test.py"] +markers = [ + "unit", + "integration", + "e2e", + "extension" +] +log_cli = true +log_cli_level = "DEBUG" +log_cli_format = "%(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)" +log_cli_date_format = "%Y-%m-%d %H:%M:%S" +asyncio_mode = "strict" +asyncio_default_fixture_loop_scope = "function" +filterwarnings = "ignore::DeprecationWarning:pydantic" # some dependencies still use the deprecated class-based config + +[tool.pytest-asyncio] +asyncio_mode = "strict" +default_fixture_loop_scope = "function" + +[tool.coverage.run] +omit = ["tests/*"] + +[tool.coverage.report] +exclude_lines = [ + "pragma: no cover", # re-enable the standard pragma + "pass", # Skip any pass lines such as may be used for @abstractmethod +] + +[tool.poe.tasks.clean] +help = "Remove all artifacts and builds" +sequence = [ + { script = "shutil:rmtree('build/', ignore_errors=1)"}, + { script = "shutil:rmtree('dist/', ignore_errors=1)"} +] + +[tool.poe.tasks.build] +help = "Build a package" +cmd = "poetry build" + +[tool.poe.tasks.commit] +help = "Creates a commit" +cmd = "cz commit" + +[tool.poe.tasks.format] +help = "Run all formating tools" +control.expr = "fix" +args = [ + { name = "TARGET", positional = true, multiple = true }, + { name = "fix", options = ["--fix"], type = "boolean", help = "Apply fixes instead of checking" } +] +uses = { PY_FILES = "_extract_python_files $TARGET" } + + [[tool.poe.tasks.format.switch]] + case = "True" + cmd = "ruff format ${PY_FILES}" + + [[tool.poe.tasks.format.switch]] + cmd = "ruff format --check ${PY_FILES}" + +[tool.poe.tasks.lint] +help = "Check for errors and fix them using ruff" +args = [ + { name = "TARGET", positional = true, multiple = true }, + { name = "FIX", options = ["--fix"], type = "boolean", help = "Apply fixes instead of checking" } +] +uses = { PY_FILES = "_extract_python_files $TARGET" } +cmd = "ruff check ${FIX:+--fix} ${PY_FILES}" + +[tool.poe.tasks.git] +control.expr = "hook" +cwd = ".." +args = ["hook", { name = "TARGET", positional = true, multiple = true }] +default = "pass" + + [[tool.poe.tasks.git.switch]] + case = "commit-msg" + shell = "cz check --allow-abort --commit-msg-file $TARGET" + + [[tool.poe.tasks.git.switch]] + case = "pre-commit" + sequence = [ + { "ref" = "copyright ${TARGET}"}, + { "ref" = "format --fix ${TARGET}"}, + { "ref" = "lint --fix ${TARGET}" }, + { "shell" = "git add -u ${TARGET}" } + ] + +[tool.poe.tasks.docs] +help = "Documentation related commands" +control.expr = "type" +args = ["type"] + + [[tool.poe.tasks.docs.switch]] + case = "build" + help = "Update sources for the documentation" + sequence = [ + { shell = "cp *.md docs/ && cp examples/README.md docs/examples.md" }, + { shell = "npx --yes embedme --source-root=. docs/**/*.md" }, + { "ref" = "format --fix docs"}, + { "ref" = "lint --fix docs" }, + { "shell" = "git add -u docs" } + ] + + [[tool.poe.tasks.docs.switch]] + case = "check" + help = "Update sources for the documentation" + sequence = [ + { shell = "npx --yes embedme --source-root=. docs/**/*.md --verify" }, + ] + + [[tool.poe.tasks.docs.switch]] + case = "watch" + help = "Open docs web dev mode" + shell = "npx --yes docsify-cli serve ./docs --open" + +[tool.poe.tasks.copyright] +help = "Copyright headers" +args = [{ name = "TARGET", positional = true, multiple = true }, { name = "type", options = ["--type"], default = "add" }] +cwd = "." +shell = "TYPE=${type} ./scripts/copyright.sh" + +[tool.poe.tasks.release] +help = "Release a new version" +sequence = [ + { shell = '[ -z "$(git status -uno --porcelain)" ] || { echo "Your index contains uncommitted changes! Commit them and try again." && exit 1; }' }, + { "ref" = "clean"}, + { "cmd" = "cz bump"}, + { "cmd" = "poetry publish --build --dry-run" }, + { "cmd" = "git push && git push origin $(git describe --tags --exact-match)"} +] + +[tool.poe.tasks.test] +args = ["type"] +control.expr = "type" + + [[tool.poe.tasks.test.switch]] + case = "unit" + help = "Run Unit Tests" + cmd = "pytest -m 'unit'" + + [[tool.poe.tasks.test.switch]] + case = "integration" + help = "Run Integration Tests" + cmd = "pytest -m 'integration' tests/integration" + + [[tool.poe.tasks.test.switch]] + case = "extension" + help = "Run Extension Tests" + cmd = "pytest -m 'integration'" + + [[tool.poe.tasks.test.switch]] + case = "e2e" + help = "Run E2E Tests" + cmd = "pytest -m 'e2e'" + + [[tool.poe.tasks.test.switch]] + help = "Run All Tests" + cmd = "pytest" + +[tool.poe.tasks._extract_python_files] +help = "Extract Python files only" +args = [{ name = "TARGET", positional = true, multiple = true }] +shell = "printf '%s\\n' $TARGET | grep -E '\\.(py|ipynb)$' | tr '\\n' ' '" + +[build-system] +requires = ["poetry-core>=2.0.0,<3.0.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry.plugins."commitizen.plugin"] +cz_monorepo_commits = "cz_commitizen:MonorepoCommitsCz" + +[tool.ruff] +lint.select = [ + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "UP", # pyupgrade + "I", # isort + "B", # bugbear + "ANN", # annotations + "N", # pep8-naming + "C4", # Comprehensions + "DTZ", # DatetimeZ + "Q", # Quotes + "SIM", # Simplify + "RUF", # Ruff + "TID", # tidy-imports + "ASYNC", # async +] +force-exclude = true +lint.ignore = ["ANN401"] +lint.fixable = ["ALL"] +lint.unfixable = [] +line-length = 120 + +[tool.ruff.lint.per-file-ignores] +"*.ipynb" = ["E501"] + +[tool.ruff.lint.pydocstyle] +convention = "google" + +[tool.ruff.lint.flake8-tidy-imports] +# Disallow all relative imports, "parents" | "all" +ban-relative-imports = "all" diff --git a/python/scripts/copyright.sh b/python/scripts/copyright.sh new file mode 100755 index 00000000..c1d46053 --- /dev/null +++ b/python/scripts/copyright.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# Copyright 2025 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +if [ "$#" -eq 0 ]; then + TARGETS=('beeai_framework/**/*.py' "cz_commitizen/**/*.py" "tests/**/*.py" "scripts/**/*.{sh,ts,js}") +else + TARGETS=("${@/#$PWD\//}") +fi + +AUTHOR="IBM Corp." + +# Check if 'nwa' command is not available and 'brew' is available +if ! command -v nwa &> /dev/null && command -v brew &> /dev/null; then + echo "Installing 'nwa' via 'brew' (https://github.com/B1NARY-GR0UP/nwa)" + brew tap B1NARY-GR0UP/nwa + brew install nwa +fi + +# Check if 'nwa' command is not available and 'go' is available, then install 'nwa' +if ! command -v nwa &> /dev/null && command -v go &> /dev/null; then + echo "Installing 'nwa' via 'go' (https://github.com/B1NARY-GR0UP/nwa)" + go install github.com/B1NARY-GR0UP/nwa@latest + # Ensure the GOPATH is added to the PATH environment variable + export PATH=$PATH:$(go env GOPATH)/bin +fi + +TYPE=${TYPE:-add} + +if command -v nwa &> /dev/null; then + echo "Running 'nwa' version $(nwa --version)" + nwa "${TYPE}" -l apache -c "$AUTHOR" "${TARGETS[@]}" +elif command -v docker &> /dev/null; then + docker run --rm -v "${PWD}:/src" ghcr.io/b1nary-gr0up/nwa:main "${TYPE}" -l apache -c "$AUTHOR" "${TARGETS[@]}" +else + if [ "$COPYRIGHT_STRICT" = true ] ; then + echo "Error: 'nwa' is not available. Either install it manually or install go/docker." + exit 1 + else + echo "Copyright script was not executed because the nwa package could not be installed." + fi +fi diff --git a/python/tests/__init__.py b/python/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python/tests/backend/test_chatmodel.py b/python/tests/backend/test_chatmodel.py new file mode 100644 index 00000000..4209120c --- /dev/null +++ b/python/tests/backend/test_chatmodel.py @@ -0,0 +1,127 @@ +# SPDX-License-Identifier: Apache-2.0 + +import asyncio +from collections.abc import AsyncGenerator + +import pytest +import pytest_asyncio +from pydantic import BaseModel + +from beeai_framework.adapters.ollama.backend.chat import OllamaChatModel +from beeai_framework.adapters.watsonx.backend.chat import WatsonxChatModel +from beeai_framework.backend.chat import ( + ChatModel, + ChatModelInput, + ChatModelOutput, + ChatModelStructureInput, + ChatModelStructureOutput, +) +from beeai_framework.backend.message import AssistantMessage, CustomMessage, Message, UserMessage +from beeai_framework.cancellation import AbortSignal +from beeai_framework.context import RunContext + + +class ReverseWordsDummyModel(ChatModel): + """Dummy model that simply reverses every word in a UserMessages""" + + model_id = "reversed_words_model" + provider_id = "reversed_words_model" + + def reverse_message_words(self, messages: list[str]) -> str: + reversed_words_messages = [] + for message in messages: + if isinstance(message, UserMessage): + reversed_words = " ".join(word[::-1] for word in message.text.split()) + reversed_words_messages.append(reversed_words) + return reversed_words_messages + + async def _create(self, input: ChatModelInput, _: RunContext) -> ChatModelOutput: + reversed_words_messages = self.reverse_message_words(input.messages) + return ChatModelOutput(messages=[AssistantMessage(w) for w in reversed_words_messages]) + + async def _create_stream(self, input: ChatModelInput, context: RunContext) -> AsyncGenerator[ChatModelOutput]: + words = self.reverse_message_words(input.messages)[0].split(" ") + + last = len(words) - 1 + for count, chunk in enumerate(words): + if context.signal.aborted: + break + await asyncio.sleep(3) + yield ChatModelOutput(messages=[AssistantMessage(f"{chunk} " if count != last else chunk)]) + + async def _create_structure(self, input: ChatModelStructureInput, run: RunContext) -> ChatModelStructureOutput: + reversed_words_messages = self.reverse_message_words(input.messages) + response_object = {"reversed": "".join(reversed_words_messages)} + return ChatModelStructureOutput(object=response_object) + + +@pytest_asyncio.fixture +def reverse_words_chat() -> ChatModel: + return ReverseWordsDummyModel() + + +@pytest.fixture +def chat_messages_list() -> list[Message]: + user_message = UserMessage("tell me something interesting") + custom_message = CustomMessage(role="custom", content="this is a custom message") + return [user_message, custom_message] + + +@pytest.mark.asyncio +async def test_chat_model_create(reverse_words_chat: ChatModel, chat_messages_list: list[Message]) -> None: + response = await reverse_words_chat.create({"messages": chat_messages_list}) + + assert len(response.messages) == 1 + assert all(isinstance(message, AssistantMessage) for message in response.messages) + assert response.messages[0].get_texts()[0].get("text") == "llet em gnihtemos gnitseretni" + + +@pytest.mark.asyncio +async def test_chat_model_structure(reverse_words_chat: ChatModel, chat_messages_list: list[Message]) -> None: + class ReverseWordsSchema(BaseModel): + reversed: str + + reverse_words_chat = ReverseWordsDummyModel() + response = await reverse_words_chat.create_structure( + { + "schema": ReverseWordsSchema, + "messages": chat_messages_list, + } + ) + + ReverseWordsSchema.model_validate(response.object) + + +@pytest.mark.asyncio +async def test_chat_model_stream(reverse_words_chat: ChatModel, chat_messages_list: list[Message]) -> None: + response = await reverse_words_chat.create({"messages": chat_messages_list, "stream": True}) + + assert len(response.messages) == 4 + assert all(isinstance(message, AssistantMessage) for message in response.messages) + assert "".join([m.get_texts()[0].get("text") for m in response.messages]) == "llet em gnihtemos gnitseretni" + + +@pytest.mark.asyncio +async def test_chat_model_abort(reverse_words_chat: ChatModel, chat_messages_list: list[Message]) -> None: + response = await reverse_words_chat.create( + {"messages": chat_messages_list, "stream": True, "abort_signal": AbortSignal.timeout(5)} + ) + + # depending on when the abort occurs the response may be None or a subset of expected response + if response is not None: + assert len(response.messages) < 4 + assert all(isinstance(message, AssistantMessage) for message in response.messages) + text = response.messages[0].get_texts()[0].get("text") + print("Response returned:", text) + assert "llet em gnihtemos gnitseretni".startswith(text) + else: + print("No response returned.") + + +@pytest.mark.asyncio +async def test_chat_model_from() -> None: + ollama_chat_model = await ChatModel.from_name("ollama:llama3.1") + assert isinstance(ollama_chat_model, OllamaChatModel) + + watsonx_chat_model = await ChatModel.from_name("watsonx:ibm/granite-3-8b-instruct") + assert isinstance(watsonx_chat_model, WatsonxChatModel) diff --git a/python/tests/backend/test_message.py b/python/tests/backend/test_message.py new file mode 100644 index 00000000..ad7b5754 --- /dev/null +++ b/python/tests/backend/test_message.py @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: Apache-2.0 + +import json +from datetime import UTC, datetime + +from beeai_framework.backend import ( + AssistantMessage, + CustomMessage, + Message, + Role, + SystemMessage, + ToolMessage, + UserMessage, +) + + +def test_user_message() -> None: + text = "this is a user message" + message = Message.of( + { + "role": Role.USER, + "text": text, + "meta": {"createdAt": datetime.now(tz=UTC)}, + } + ) + content = message.content + assert isinstance(message, UserMessage) + assert len(content) == 1 + assert content[0].get("text") == text + + +def test_system_message() -> None: + text = "this is a system message" + message = Message.of( + { + "role": Role.SYSTEM, + "text": text, + "meta": {"createdAt": datetime.now(tz=UTC)}, + } + ) + content = message.content + assert isinstance(message, SystemMessage) + assert len(content) == 1 + assert content[0].get("text") == text + + +def test_assistant_message() -> None: + text = "this is an assistant message" + message = Message.of( + { + "role": Role.ASSISTANT, + "text": text, + "meta": {"createdAt": datetime.now(tz=UTC)}, + } + ) + content = message.content + assert isinstance(message, AssistantMessage) + assert len(content) == 1 + assert content[0].get("text") == text + + +def test_tool_message() -> None: + tool_result = { + "type": "tool-result", + "result": "this is a tool message", + "toolName": "tool_name", + "toolCallId": "tool_call_id", + } + message = Message.of( + { + "role": Role.TOOL, + "text": json.dumps(tool_result), + "meta": {"createdAt": datetime.now(tz=UTC)}, + } + ) + content = message.content + assert len(content) == 1 + assert content[0] == tool_result + assert isinstance(message, ToolMessage) + + +def test_custom_message() -> None: + text = "this is a custom message" + message = Message.of( + { + "role": "custom", + "text": text, + "meta": {"createdAt": datetime.now(tz=UTC)}, + } + ) + content = message.content + assert isinstance(message, CustomMessage) + assert len(content) == 1 + assert content[0].get("text") == text + assert message.role == "custom" diff --git a/python/tests/conftest.py b/python/tests/conftest.py new file mode 100644 index 00000000..282d35c6 --- /dev/null +++ b/python/tests/conftest.py @@ -0,0 +1,8 @@ +"""Pytest configuration for asyncio testing.""" + +from pytest import Parser + + +def pytest_addoption(parser: Parser) -> None: + """Add pytest command line options.""" + parser.addini("asyncio_mode", "default mode for async fixtures", default="strict") diff --git a/python/tests/errors_test.py b/python/tests/errors_test.py new file mode 100644 index 00000000..f3564506 --- /dev/null +++ b/python/tests/errors_test.py @@ -0,0 +1,163 @@ +# SPDX-License-Identifier: Apache-2.0 + +import unittest + +from beeai_framework.errors import ( + ArgumentError, + FrameworkError, + UnimplementedError, +) + + +class TestFrameworkError(unittest.TestCase): + """ + Test cases for Framework Error + + Note that FrameworkError does not support passing cause on constructor. + In these tests this is setup directly by assigning error.__cause__ + In consuming code we expect to use 'raise ValueError("Calculation failed") from e' + """ + + # TODO: Add test methods that create actual exceptions + # TODO: Update direct setting of __cause__ after instantiation with use of constructor + + def test_basic_exception(self) -> None: + err = FrameworkError("Basic") + self.assertEqual(err.message, "Basic") + self.assertTrue(err.is_fatal()) + self.assertFalse(err.is_retryable()) + # Will be this exception or exception at end of chain + self.assertEqual(err.get_cause(), err) + self.assertEqual(err.name(), "FrameworkError") + + def test_custom_properties(self) -> None: + err = FrameworkError("Custom", is_fatal=False, is_retryable=True) + self.assertFalse(err.is_fatal()) + self.assertTrue(err.is_retryable()) + + # Get cause returns the last exception in the chain - *itself* otherwise + def test_cause_single(self) -> None: + err = FrameworkError("Error") + self.assertEqual(err.get_cause(), err) + + def test_cause(self) -> None: + # Often a standard exception will be the original cause + inner_err = ValueError("Inner") + err = FrameworkError("Outer") + err.__cause__ = inner_err + self.assertEqual(err.get_cause(), inner_err) + + def test_has_fatal_error(self) -> None: + err = FrameworkError("Fatal", is_fatal=True) + self.assertTrue(err.has_fatal_error()) + + err2 = FrameworkError("Non-fatal", is_fatal=False) + self.assertFalse(err2.has_fatal_error()) + + inner_err = ValueError("Inner error") + err3 = FrameworkError("Outer non-fatal", is_fatal=False) + err3.__cause__ = inner_err + self.assertFalse(err3.has_fatal_error()) + + inner_err2 = FrameworkError("Inner fatal", is_fatal=True) + err4 = FrameworkError("Outer non-fatal", is_fatal=False) + err4.__cause__ = inner_err2 + self.assertTrue(err4.has_fatal_error()) + + def test_traverse_errors(self) -> None: + # Simple - one level of nesting - so 2 in total + inner_err = ValueError("error 2") + err = FrameworkError("error 1") + err.__cause__ = inner_err + errors = err.traverse_errors() + self.assertEqual(len(errors), 2) + self.assertIn(err, errors) + self.assertIn(inner_err, errors) + + # Test deeper nesting - 4 + err4 = ValueError("error 4") + err3 = TypeError("error 3") + err3.__cause__ = err4 + err2 = FrameworkError("error 2") + err2.__cause__ = err3 + err1 = FrameworkError("error 1") + err1.__cause__ = err2 + errors = err1.traverse_errors() + # count includes the outermost error (1) + self.assertEqual(len(errors), 4) + self.assertIn(err1, errors) + self.assertIn(err2, errors) + self.assertIn(err3, errors) + self.assertIn(err4, errors) + + # @unittest.skip("TODO: Skip as message ie str(err) needs to be used in dump/explain") + def test_explain(self) -> None: + inner_err = ValueError("Inner") + err = FrameworkError("Outer") + err.__cause__ = inner_err + explanation = err.explain() + self.assertIn("Outer", explanation) + self.assertIn("Caused by: Inner", explanation) + + # Test with an exception that doesn't have a 'message' attribute (not all do) + class NoMessageError(Exception): + pass + + inner_err2 = NoMessageError() + err2 = FrameworkError("Outer error") + err2.__cause__ = inner_err2 + explanation2 = err2.explain() + self.assertIn("Outer error", explanation2) + self.assertIn("Caused by: NoMessageError", explanation2) + + def test_dump(self) -> None: + inner_err = ValueError("Inner ") + err = FrameworkError("Outer", is_fatal=True, is_retryable=False) + err.__cause__ = inner_err + dump = err.dump() + self.assertIn("Class: FrameworkError, Fatal: Fatal, Retryable: , Message: Outer", dump) + self.assertIn("Caused By: Class: ValueError, Message: Inner", dump) + + # Test with an exception that doesn't have 'is_fatal' and 'is_retryable' attributes + inner_err2 = TypeError("Type error") + err2 = FrameworkError("Outer") + err2.__cause__ = inner_err2 + dump2 = err2.dump() + self.assertIn("Class: FrameworkError, Fatal: Fatal, Retryable: , Message: Outer", dump2) # Outer + self.assertIn("Caused By: Class: TypeError, Message: Type error", dump2) # Inner + + # @unittest.skip("TODO: Skip as wrapped exception is not implemented correctly") + def test_ensure(self) -> None: + inner_err = ValueError("Value error") + wrapped_err = FrameworkError.ensure(inner_err) + self.assertIsInstance(wrapped_err, FrameworkError) + self.assertEqual(wrapped_err.get_cause(), inner_err) + self.assertEqual(str(inner_err), wrapped_err.message) + + # Ensure doesn't re-wrap a FrameworkError + fw_err = FrameworkError("Already a FrameworkError") + wrapped_fw_err = FrameworkError.ensure(fw_err) + self.assertIs(wrapped_fw_err, fw_err) # Check it returns the original. + + # Basic tests for custom errors. Not much new behaviour, only default properties + def test_not_implemented_error(self) -> None: + err = UnimplementedError() + self.assertEqual(err.message, "Not implemented!") + self.assertTrue(err.is_fatal()) + self.assertFalse(err.is_retryable()) + + err2 = UnimplementedError("Custom not implemented message") + self.assertEqual(err2.message, "Custom not implemented message") + + def test_value_framework_error(self) -> None: + err = ArgumentError() + self.assertEqual(err.message, "Provided value is not supported!") + self.assertTrue(err.is_fatal()) + self.assertFalse(err.is_retryable()) + + err2 = ArgumentError("Custom argument error message") + self.assertEqual(err2.message, "Custom argument error message") + + +if __name__ == "__main__": + unittest.main() diff --git a/python/tests/runners/test_default_runner.py b/python/tests/runners/test_default_runner.py new file mode 100644 index 00000000..d07830e0 --- /dev/null +++ b/python/tests/runners/test_default_runner.py @@ -0,0 +1,45 @@ +import json + +import pytest + +from beeai_framework.agents.runners.base import BeeRunnerToolInput +from beeai_framework.agents.runners.default.runner import DefaultRunner +from beeai_framework.agents.types import ( + BeeAgentExecutionConfig, + BeeInput, + BeeIterationResult, + BeeMeta, + BeeRunInput, + BeeRunOptions, +) +from beeai_framework.backend.chat import ChatModel +from beeai_framework.memory.token_memory import TokenMemory +from beeai_framework.tools.weather.openmeteo import OpenMeteoTool + + +@pytest.mark.asyncio +async def test_runner_init() -> None: + llm: ChatModel = await ChatModel.from_name("ollama:granite3.1-dense:8b") + + input = BeeInput( + llm=llm, + tools=[OpenMeteoTool()], + memory=TokenMemory(llm), + execution=BeeAgentExecutionConfig(max_iterations=10, max_retries_per_step=3, total_max_retries=10), + ) + runner = DefaultRunner( + input=input, options=BeeRunOptions(execution=input.execution, signal=None), run=None + ) # TODO Figure out run + + await runner.init(BeeRunInput(prompt="What is the current weather in White Plains?")) + + await runner.tool( + input=BeeRunnerToolInput( + state=BeeIterationResult( + tool_name="OpenMeteoTool", tool_input=json.dumps({"location_name": "White Plains"}) + ), + emitter=None, + meta=BeeMeta(iteration=0), + signal=None, + ) + ) diff --git a/python/tests/templates/test_templates.py b/python/tests/templates/test_templates.py new file mode 100644 index 00000000..00869c6e --- /dev/null +++ b/python/tests/templates/test_templates.py @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: Apache-2.0 + +from datetime import datetime +from zoneinfo import ZoneInfo + +import pytest +from pydantic import BaseModel, ValidationError + +from beeai_framework.utils.errors import PromptTemplateError +from beeai_framework.utils.templates import PromptTemplate + + +@pytest.fixture +def template() -> PromptTemplate: + class TestPromptInputSchema(BaseModel): + task: str + count: int + + template = PromptTemplate(schema=TestPromptInputSchema, template="""This is the task: {{task}}{{count}}""") + + return template + + +def test_render_valid(template: PromptTemplate) -> None: + assert template.render({"task": "Test", "count": 1}) == "This is the task: Test1" + + +def test_render_invalid_missing(template: PromptTemplate) -> None: + with pytest.raises(ValidationError): + template.render({"task": "Test"}) + + +def test_render_invalid_type(template: PromptTemplate) -> None: + with pytest.raises(ValidationError): + template.render({"task": 1, "count": 1}) + + +def test_render_function(template: PromptTemplate) -> None: + class TestPromptInputSchema(BaseModel): + task: str + + template = PromptTemplate( + schema=TestPromptInputSchema, + functions={"formatDate": lambda: datetime.now(ZoneInfo("US/Eastern")).strftime("%A, %B %d, %Y at %I:%M:%S %p")}, + template="""{{task}} {{formatDate}}""", + ) + + template.render(TestPromptInputSchema(task="Here is a task!")) + + +def test_render_function_clash(template: PromptTemplate) -> None: + class TestPromptInputSchema(BaseModel): + task: str + + template = PromptTemplate( + schema=TestPromptInputSchema, + functions={"task": lambda: "Clashing task!"}, + template="""{{task}}""", + ) + + with pytest.raises(PromptTemplateError): + template.render(TestPromptInputSchema(task="Here is a task!")) diff --git a/python/tests/tools/test_duckduckgo.py b/python/tests/tools/test_duckduckgo.py new file mode 100644 index 00000000..19261d4c --- /dev/null +++ b/python/tests/tools/test_duckduckgo.py @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from beeai_framework.tools import ToolInputValidationError +from beeai_framework.tools.search.duckduckgo import ( + DuckDuckGoSearchTool, + DuckDuckGoSearchToolInput, + DuckDuckGoSearchToolOutput, +) + + +@pytest.fixture +def tool() -> DuckDuckGoSearchTool: + return DuckDuckGoSearchTool() + + +def test_call_invalid_input_type(tool: DuckDuckGoSearchTool) -> None: + with pytest.raises(ToolInputValidationError): + tool.run(input={"search": "Poland"}) + + +def test_output(tool: DuckDuckGoSearchTool) -> None: + result = tool.run(input=DuckDuckGoSearchToolInput(query="What is the area of the Poland?")) + assert type(result) is DuckDuckGoSearchToolOutput + assert "322,575" in result.get_text_content() diff --git a/python/tests/tools/test_mcp_tool.py b/python/tests/tools/test_mcp_tool.py new file mode 100644 index 00000000..d6631ef3 --- /dev/null +++ b/python/tests/tools/test_mcp_tool.py @@ -0,0 +1,133 @@ +# SPDX-License-Identifier: Apache-2.0 + +from collections.abc import Callable +from unittest.mock import AsyncMock, MagicMock + +import pytest +from mcp.client.session import ClientSession +from mcp.types import CallToolResult, TextContent +from mcp.types import Tool as MCPToolInfo + +from beeai_framework.tools.mcp_tools import MCPTool, MCPToolOutput, Tool + + +# Common Fixtures +@pytest.fixture +def mock_client_session() -> AsyncMock: + return AsyncMock(spec=ClientSession) + + +# Basic Tool Test Fixtures +@pytest.fixture +def mock_tool_info() -> MCPToolInfo: + return MCPToolInfo( + name="test_tool", + description="A test tool", + inputSchema={}, + ) + + +@pytest.fixture +def call_tool_result() -> CallToolResult: + return CallToolResult( + output="test_output", + content=[ + { + "text": "test_content", + "type": "text", + } + ], + ) + + +# Calculator Tool Test Fixtures +@pytest.fixture +def add_numbers_tool_info() -> MCPToolInfo: + return MCPToolInfo( + name="add_numbers", + description="Adds two numbers together", + inputSchema={ + "type": "object", + "properties": {"a": {"type": "number"}, "b": {"type": "number"}}, + "required": ["a", "b"], + }, + ) + + +@pytest.fixture +def add_result() -> CallToolResult: + return CallToolResult( + output="8", + content=[TextContent(text="8", type="text")], + ) + + +# Basic Tool Tests +class TestMCPTool: + @pytest.mark.asyncio + async def test_mcp_tool_initialization(self, mock_client_session: ClientSession, mock_tool_info: Tool) -> None: + tool = MCPTool(client=mock_client_session, tool=mock_tool_info) + + assert tool.name == "test_tool" + assert tool.description == "A test tool" + assert tool.input_schema() == {} + + @pytest.mark.asyncio + async def test_mcp_tool_run( + self, mock_client_session: ClientSession, mock_tool_info: Tool, call_tool_result: MCPToolOutput + ) -> None: + mock_client_session.call_tool = AsyncMock(return_value=call_tool_result) + tool = MCPTool(client=mock_client_session, tool=mock_tool_info) + input_data = {"key": "value"} + + result = await tool._run(input_data) + + mock_client_session.call_tool.assert_awaited_once_with(name="test_tool", arguments=input_data) + assert isinstance(result, MCPToolOutput) + assert result.result == call_tool_result + + @pytest.mark.asyncio + async def test_mcp_tool_from_client(self, mock_client_session: ClientSession, mock_tool_info: Tool) -> None: + tools_result = MagicMock() + tools_result.tools = [mock_tool_info] + mock_client_session.list_tools = AsyncMock(return_value=tools_result) + + tools = await MCPTool.from_client(mock_client_session) + + mock_client_session.list_tools.assert_awaited_once() + assert len(tools) == 1 + assert tools[0].name == "test_tool" + assert tools[0].description == "A test tool" + + +# Calculator Tool Tests +class TestAddNumbersTool: + @pytest.mark.asyncio + async def test_add_numbers_mcp( + self, mock_client_session: ClientSession, add_numbers_tool_info: MCPToolInfo, add_result: Callable + ) -> None: + mock_client_session.call_tool = AsyncMock(return_value=add_result) + tool = MCPTool(client=mock_client_session, tool=add_numbers_tool_info) + input_data = {"a": 5, "b": 3} + + result = await tool._run(input_data) + + mock_client_session.call_tool.assert_awaited_once_with(name="add_numbers", arguments=input_data) + assert isinstance(result, MCPToolOutput) + assert result.result.output == "8" + assert result.result.content[0].text == "8" + + @pytest.mark.asyncio + async def test_add_numbers_from_client( + self, mock_client_session: ClientSession, add_numbers_tool_info: MCPToolInfo + ) -> None: + tools_result = MagicMock() + tools_result.tools = [add_numbers_tool_info] + mock_client_session.list_tools = AsyncMock(return_value=tools_result) + + tools = await MCPTool.from_client(mock_client_session) + + mock_client_session.list_tools.assert_awaited_once() + assert len(tools) == 1 + assert tools[0].name == "add_numbers" + assert "adds two numbers" in tools[0].description.lower() diff --git a/python/tests/tools/test_opemmeteo.py b/python/tests/tools/test_opemmeteo.py new file mode 100644 index 00000000..1e98dcbc --- /dev/null +++ b/python/tests/tools/test_opemmeteo.py @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from beeai_framework.tools import ToolInputValidationError +from beeai_framework.tools.tool import StringToolOutput +from beeai_framework.tools.weather.openmeteo import OpenMeteoTool, OpenMeteoToolInput + + +@pytest.fixture +def tool() -> OpenMeteoTool: + return OpenMeteoTool() + + +def test_call_model(tool: OpenMeteoTool) -> None: + tool.run( + input=OpenMeteoToolInput( + location_name="Cambridge", + country="US", + temperature_unit="fahrenheit", + ) + ) + + +def test_call_dict(tool: OpenMeteoTool) -> None: + tool.run(input={"location_name": "White Plains"}) + + +def test_call_invalid_missing_field(tool: OpenMeteoTool) -> None: + with pytest.raises(ToolInputValidationError): + tool.run(input={}) + + +def test_call_invalid_bad_type(tool: OpenMeteoTool) -> None: + with pytest.raises(ToolInputValidationError): + tool.run(input={"location_name": 1}) + + +def test_output(tool: OpenMeteoTool) -> None: + result = tool.run(input={"location_name": "White Plains"}) + assert type(result) is StringToolOutput + assert "current" in result.get_text_content() + + +def test_bad_start_date_format(tool: OpenMeteoTool) -> None: + with pytest.raises(ToolInputValidationError): + tool.run(input=OpenMeteoToolInput(location_name="White Plains", start_date="2025:01:01", end_date="2025:01:02")) + + +def test_bad_end_date_format(tool: OpenMeteoTool) -> None: + with pytest.raises(ToolInputValidationError): + tool.run(input=OpenMeteoToolInput(location_name="White Plains", start_date="2025-01-01", end_date="2025:01:02")) + + +def test_bad_dates(tool: OpenMeteoTool) -> None: + with pytest.raises(ToolInputValidationError): + tool.run(input=OpenMeteoToolInput(location_name="White Plains", start_date="2025-02-02", end_date="2025-02-01")) diff --git a/python/tests/utils/test_custom_logger.py b/python/tests/utils/test_custom_logger.py new file mode 100644 index 00000000..ac266858 --- /dev/null +++ b/python/tests/utils/test_custom_logger.py @@ -0,0 +1,19 @@ +import logging + +from beeai_framework.backend import Role +from beeai_framework.utils import BeeLogger, MessageEvent + + +def test_redefine_logging_methods() -> None: + logger = BeeLogger("app", level=logging.DEBUG) + logger.add_logging_level("TEST1", 1, "test") # adds test log level + logger.add_logging_level("TEST2", 2, "test") # does not redefine test log level + logger.add_logging_level("INFO", logging.INFO) # does not redefine info log level + assert callable(logger.test) + + +def test_log_events() -> None: + logger = BeeLogger("app") + event = MessageEvent(source=Role.USER, message="Test") + logger.log_message_events(event) + logger.info("Test", extra={"is_event_message": False}) diff --git a/python/tests/workflows/multi_agents.py b/python/tests/workflows/multi_agents.py new file mode 100644 index 00000000..a564d306 --- /dev/null +++ b/python/tests/workflows/multi_agents.py @@ -0,0 +1,50 @@ +import pytest + +from beeai_framework.adapters.ollama.backend.chat import OllamaChatModel +from beeai_framework.agents.bee import BeeAgent +from beeai_framework.agents.types import BeeInput +from beeai_framework.backend.message import UserMessage +from beeai_framework.memory import TokenMemory, UnconstrainedMemory +from beeai_framework.workflows.agent import AgentFactoryInput, AgentWorkflow + + +@pytest.mark.asyncio +async def test_multi_agents_workflow_basic() -> None: + llm = OllamaChatModel() + + workflow: AgentWorkflow = AgentWorkflow() + workflow.add_agent(agent=AgentFactoryInput(name="Translator assistant", tools=[], llm=llm)) + + memory = UnconstrainedMemory() + await memory.add(UserMessage(content="Say Hello in German.")) + response = await workflow.run(memory.messages) + print(response.state) + assert "Hallo" in response.state.final_answer + + +@pytest.mark.asyncio +async def test_multi_agents_workflow_creation() -> None: + llm = OllamaChatModel() + + workflow: AgentWorkflow = AgentWorkflow() + workflow.add_agent(BeeAgent(BeeInput(llm=llm, tools=[], memory=TokenMemory(llm)))) + workflow.add_agent(agent=lambda memory: BeeAgent(BeeInput(llm=llm, tools=[], memory=memory))) + + assert len(workflow.workflow.step_names) == 2 + + memory = UnconstrainedMemory() + await memory.add(UserMessage(content="Say Hello in Italian.")) + response = await workflow.run(memory.messages) + assert "Ciao" in response.state.final_answer + + +@pytest.mark.asyncio +async def test_multi_agents_workflow_agent_delete() -> None: + llm = OllamaChatModel() + + workflow: AgentWorkflow = AgentWorkflow() + workflow.add_agent(BeeAgent(BeeInput(llm=llm, tools=[], memory=UnconstrainedMemory()))) + workflow.del_agent("BeeAI") + workflow.add_agent(BeeAgent(BeeInput(llm=llm, tools=[], memory=UnconstrainedMemory()))) + + assert len(workflow.workflow.step_names) == 1 diff --git a/python/tests/workflows/test_workflow.py b/python/tests/workflows/test_workflow.py new file mode 100644 index 00000000..4d877d65 --- /dev/null +++ b/python/tests/workflows/test_workflow.py @@ -0,0 +1,139 @@ +from unittest.mock import AsyncMock + +import pytest +from pydantic import BaseModel, ValidationError + +from beeai_framework.workflows import Workflow + + +@pytest.mark.asyncio +async def test_workflow_basic() -> None: + # State + class State(BaseModel): + input: str + hops: int + output: str | None = None + + # Steps + def first(state: State) -> str: + print("Running first step!", state) + if state.hops > 0: + state.hops -= 1 + return Workflow.SELF + else: + return "second" + + def second(state: State) -> str: + print("Running second step!", state) + state.output = f"There are {state.hops} hops remaining!" + return Workflow.NEXT + + def third(state: State) -> str: + print("Running third step!", state) + return Workflow.END + + workflow: Workflow = Workflow(schema=State) + workflow.add_step("first", first) + workflow.add_step("second", second) + workflow.add_step("third", third) + + response = await workflow.run(State(input="Hello there!", hops=10)) + print(response.state) + assert response.state.hops == 0 + assert response.state.output == "There are 0 hops remaining!" + + +@pytest.mark.asyncio +async def test_workflow_validation() -> None: + # State + class State(BaseModel): + input: str + hops: int + output: str | None = None + + # Steps + def first(state: State) -> str: + print("Running first step!", state) + if state.hops > 0: + state.hops -= 1 + return Workflow.SELF + else: + return "second" + + def second(state: State) -> str: + print("Running second step!", state) + # Introduce schema error here + state.output = f"There are {state.hops} hops remaining!" + state.hops = "wrong type" # type: ignore + return Workflow.NEXT + + def third(state: State) -> str: + print("Running third step!", state) + return Workflow.END + + workflow: Workflow = Workflow(schema=State) + workflow.add_step("first", first) + workflow.add_step("second", second) + workflow.add_step("third", third) + + with pytest.raises(ValidationError): + await workflow.run(State(input="Hello there!", hops=10)) + + +@pytest.mark.asyncio +async def test_workflow_step_delete() -> None: + # State + class State(BaseModel): + output: str | None = None + + # Steps + def first(state: State) -> None: + print("Running first step!", state) + + def second(state: State) -> None: + print("Running second step!", state) + state.output = "This is the output!" + + def third(state: State) -> None: + print("Running third step!", state) + + workflow: Workflow = Workflow(schema=State) + workflow.add_step("first", first) + workflow.add_step("second", second) + workflow.add_step("third", third) + + # Delete second step, output will not be set + workflow.delete_step("second") + + response = await workflow.run(State()) + + assert len(workflow.steps) == 2 + assert response.state.output is None + + +@pytest.mark.asyncio +async def test_workflow_async_steps() -> None: + mock_func = AsyncMock(return_value="Mocked data") + + # State + class State(BaseModel): + output: str | None = None + + # Steps + async def first(state: State) -> None: + print("Running first step!", state) + state.output = await mock_func() + + def second(state: State) -> None: + print("Running second step!", state) + + async def third(state: State) -> None: + print("Running third step!", state) + + workflow: Workflow = Workflow(schema=State) + workflow.add_step("first", first) + workflow.add_step("second", second) + workflow.add_step("third", third) + + response = await workflow.run(State()) + assert response.state.output == "Mocked data"