diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-opea/.gitignore b/llama-index-integrations/embeddings/llama-index-embeddings-opea/.gitignore new file mode 100644 index 0000000000000..990c18de22908 --- /dev/null +++ b/llama-index-integrations/embeddings/llama-index-embeddings-opea/.gitignore @@ -0,0 +1,153 @@ +llama_index/_static +.DS_Store +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +bin/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +etc/ +include/ +lib/ +lib64/ +parts/ +sdist/ +share/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +.ruff_cache + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints +notebooks/ + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +pyvenv.cfg + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# Jetbrains +.idea +modules/ +*.swp + +# VsCode +.vscode + +# pipenv +Pipfile +Pipfile.lock + +# pyright +pyrightconfig.json diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-opea/BUILD b/llama-index-integrations/embeddings/llama-index-embeddings-opea/BUILD new file mode 100644 index 0000000000000..0896ca890d8bf --- /dev/null +++ b/llama-index-integrations/embeddings/llama-index-embeddings-opea/BUILD @@ -0,0 +1,3 @@ +poetry_requirements( + name="poetry", +) diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-opea/Makefile b/llama-index-integrations/embeddings/llama-index-embeddings-opea/Makefile new file mode 100644 index 0000000000000..ab42976aaedf1 --- /dev/null +++ b/llama-index-integrations/embeddings/llama-index-embeddings-opea/Makefile @@ -0,0 +1,16 @@ +GIT_ROOT ?= $(shell git rev-parse --show-toplevel) + +help: ## Show all Makefile targets. + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}' + +format: ## Run code autoformatters (black). + git ls-files | black + +lint: ## Run linters: pre-commit (black, ruff, codespell) and mypy + pre-commit install && git ls-files | xargs pre-commit run --show-diff-on-failure --files + +test: ## Run tests via pytest. + pytest tests + +watch-docs: ## Build and watch documentation. + sphinx-autobuild docs/ docs/_build/html --open-browser --watch $(GIT_ROOT)/llama_index/ diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-opea/README.md b/llama-index-integrations/embeddings/llama-index-embeddings-opea/README.md new file mode 100644 index 0000000000000..2325884b393a8 --- /dev/null +++ b/llama-index-integrations/embeddings/llama-index-embeddings-opea/README.md @@ -0,0 +1,29 @@ +# LlamaIndex Embeddings Integration: OPEA Embeddings + +OPEA (Open Platform for Enterprise AI) is a platform for building, deploying, and scaling AI applications. As part of this platform, many core gen-ai components are available for deployment as microservices, including LLMs. + +Visit [https://opea.dev](https://opea.dev) for more information, and their [GitHub](https://github.com/opea-project/GenAIComps) for the source code of the OPEA components. + +## Installation + +1. Install the required Python packages: + +```bash +%pip install llama-index-embeddings-opea +``` + +## Usage + +```python +from llama_index.embeddings.opea import OPEAEmbedding + +embed_model = OPEAEmbedding( + model="", + api_base="http://localhost:8080/v1", + embed_batch_size=10, +) + +embeddings = embed_model.get_text_embedding("text") + +embeddings = embed_model.get_text_embedding_batch(["text1", "text2"]) +``` diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-opea/llama_index/embeddings/opea/BUILD b/llama-index-integrations/embeddings/llama-index-embeddings-opea/llama_index/embeddings/opea/BUILD new file mode 100644 index 0000000000000..db46e8d6c978c --- /dev/null +++ b/llama-index-integrations/embeddings/llama-index-embeddings-opea/llama_index/embeddings/opea/BUILD @@ -0,0 +1 @@ +python_sources() diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-opea/llama_index/embeddings/opea/__init__.py b/llama-index-integrations/embeddings/llama-index-embeddings-opea/llama_index/embeddings/opea/__init__.py new file mode 100644 index 0000000000000..596f9ae6af280 --- /dev/null +++ b/llama-index-integrations/embeddings/llama-index-embeddings-opea/llama_index/embeddings/opea/__init__.py @@ -0,0 +1,3 @@ +from llama_index.embeddings.opea.base import OPEAEmbedding + +__all__ = ["OPEAEmbedding"] diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-opea/llama_index/embeddings/opea/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-opea/llama_index/embeddings/opea/base.py new file mode 100644 index 0000000000000..c29a1a05e1f3a --- /dev/null +++ b/llama-index-integrations/embeddings/llama-index-embeddings-opea/llama_index/embeddings/opea/base.py @@ -0,0 +1,67 @@ +import httpx +from typing import Any, Dict, Optional + +from llama_index.core.base.embeddings.base import ( + DEFAULT_EMBED_BATCH_SIZE, +) +from llama_index.core.callbacks import CallbackManager +from llama_index.embeddings.openai import OpenAIEmbedding + + +class OPEAEmbedding(OpenAIEmbedding): + """ + OPEA class for embeddings. + + Args: + model (str): Model for embedding. + api_base (str): The base URL for OPEA Embeddings microservice. + additional_kwargs (Dict[str, Any]): Additional kwargs for the OpenAI API. + + Examples: + `pip install llama-index-embeddings-opea` + + ```python + from llama_index.embeddings.opea import OPEAEmbedding + + embed_model = OPEAEmbedding( + model_name="...", + api_base="http://localhost:8080", + ) + ``` + """ + + def __init__( + self, + model_name: str, + api_base: str, + dimensions: Optional[int] = None, + embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, + additional_kwargs: Optional[Dict[str, Any]] = None, + max_retries: int = 10, + timeout: float = 60.0, + reuse_client: bool = True, + callback_manager: Optional[CallbackManager] = None, + default_headers: Optional[Dict[str, str]] = None, + http_client: Optional[httpx.Client] = None, + api_key: Optional[str] = "fake", + **kwargs: Any, + ) -> None: + super().__init__( + model_name=model_name, + dimensions=dimensions, + embed_batch_size=embed_batch_size, + additional_kwargs=additional_kwargs, + api_key=api_key, + api_base=api_base, + max_retries=max_retries, + timeout=timeout, + reuse_client=reuse_client, + callback_manager=callback_manager, + default_headers=default_headers, + http_client=http_client, + **kwargs, + ) + + @classmethod + def class_name(cls) -> str: + return "OPEAEmbedding" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-opea/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-opea/pyproject.toml new file mode 100644 index 0000000000000..8ac52c6a98f9e --- /dev/null +++ b/llama-index-integrations/embeddings/llama-index-embeddings-opea/pyproject.toml @@ -0,0 +1,63 @@ +[build-system] +build-backend = "poetry.core.masonry.api" +requires = ["poetry-core"] + +[tool.codespell] +check-filenames = true +check-hidden = true +skip = "*.csv,*.html,*.json,*.jsonl,*.pdf,*.txt,*.ipynb" + +[tool.llamahub] +contains_example = false +import_path = "llama_index.embeddings.opea" + +[tool.llamahub.class_authors] +OPEAEmbedding = "llama-index" + +[tool.mypy] +disallow_untyped_defs = true +exclude = ["_static", "build", "examples", "notebooks", "venv"] +ignore_missing_imports = true +python_version = "3.8" + +[tool.poetry] +authors = ["logan-markewich"] +description = "llama-index embeddings opea integration" +exclude = ["**/BUILD"] +license = "MIT" +name = "llama-index-embeddings-opea" +readme = "README.md" +version = "0.1.0" + +[tool.poetry.dependencies] +python = ">=3.8.1,<4.0" +llama-index-llms-openai = "^0.2.0" +llama-index-core = "^0.11.0" + +[tool.poetry.group.dev.dependencies] +ipython = "8.10.0" +jupyter = "^1.0.0" +mypy = "0.991" +pre-commit = "3.2.0" +pylint = "2.15.10" +pytest = "7.2.1" +pytest-mock = "3.11.1" +ruff = "0.0.292" +tree-sitter-languages = "^1.8.0" +types-Deprecated = ">=0.1.0" +types-PyYAML = "^6.0.12.12" +types-protobuf = "^4.24.0.4" +types-redis = "4.5.5.0" +types-requests = "2.28.11.8" +types-setuptools = "67.1.0.0" + +[tool.poetry.group.dev.dependencies.black] +extras = ["jupyter"] +version = "<=23.9.1,>=23.7.0" + +[tool.poetry.group.dev.dependencies.codespell] +extras = ["toml"] +version = ">=v2.2.6" + +[[tool.poetry.packages]] +include = "llama_index/" diff --git a/llama-index-integrations/llms/llama-index-llms-opea/.gitignore b/llama-index-integrations/llms/llama-index-llms-opea/.gitignore new file mode 100644 index 0000000000000..990c18de22908 --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-opea/.gitignore @@ -0,0 +1,153 @@ +llama_index/_static +.DS_Store +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +bin/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +etc/ +include/ +lib/ +lib64/ +parts/ +sdist/ +share/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +.ruff_cache + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints +notebooks/ + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +pyvenv.cfg + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# Jetbrains +.idea +modules/ +*.swp + +# VsCode +.vscode + +# pipenv +Pipfile +Pipfile.lock + +# pyright +pyrightconfig.json diff --git a/llama-index-integrations/llms/llama-index-llms-opea/BUILD b/llama-index-integrations/llms/llama-index-llms-opea/BUILD new file mode 100644 index 0000000000000..0896ca890d8bf --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-opea/BUILD @@ -0,0 +1,3 @@ +poetry_requirements( + name="poetry", +) diff --git a/llama-index-integrations/llms/llama-index-llms-opea/Makefile b/llama-index-integrations/llms/llama-index-llms-opea/Makefile new file mode 100644 index 0000000000000..b9eab05aa3706 --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-opea/Makefile @@ -0,0 +1,17 @@ +GIT_ROOT ?= $(shell git rev-parse --show-toplevel) + +help: ## Show all Makefile targets. + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}' + +format: ## Run code autoformatters (black). + pre-commit install + git ls-files | xargs pre-commit run black --files + +lint: ## Run linters: pre-commit (black, ruff, codespell) and mypy + pre-commit install && git ls-files | xargs pre-commit run --show-diff-on-failure --files + +test: ## Run tests via pytest. + pytest tests + +watch-docs: ## Build and watch documentation. + sphinx-autobuild docs/ docs/_build/html --open-browser --watch $(GIT_ROOT)/llama_index/ diff --git a/llama-index-integrations/llms/llama-index-llms-opea/README.md b/llama-index-integrations/llms/llama-index-llms-opea/README.md new file mode 100644 index 0000000000000..048fc495d8935 --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-opea/README.md @@ -0,0 +1,48 @@ +# LlamaIndex Llms Integration: OPEA LLM + +OPEA (Open Platform for Enterprise AI) is a platform for building, deploying, and scaling AI applications. As part of this platform, many core gen-ai components are available for deployment as microservices, including LLMs. + +Visit [https://opea.dev](https://opea.dev) for more information, and their [GitHub](https://github.com/opea-project/GenAIComps) for the source code of the OPEA components. + +## Installation + +1. Install the required Python packages: + +```bash +%pip install llama-index-llms-opea +``` + +## Usage + +```python +from llama_index.core.llms import ChatMessage +from llama_index.llms.opea import OPEA + +llm = OPEA( + model="meta-llama/Meta-Llama-3.1-8B-Instruct", + api_base="http://localhost:8080/v1", + temperature=0.7, + max_tokens=256, + additional_kwargs={"top_p": 0.95}, +) + +# Complete a prompt +response = llm.complete("What is the capital of France?") +print(response) + +# Stream a chat response +response = llm.stream_chat( + [ChatMessage(role="user", content="What is the capital of France?")] +) +for chunk in response: + print(chunk.delta, end="", flush=True) +``` + +All available methods include: + +- `complete()` +- `stream_complete()` +- `chat()` +- `stream_chat()` + +as well as async versions of the methods with the `a` prefix. diff --git a/llama-index-integrations/llms/llama-index-llms-opea/llama_index/llms/opea/BUILD b/llama-index-integrations/llms/llama-index-llms-opea/llama_index/llms/opea/BUILD new file mode 100644 index 0000000000000..db46e8d6c978c --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-opea/llama_index/llms/opea/BUILD @@ -0,0 +1 @@ +python_sources() diff --git a/llama-index-integrations/llms/llama-index-llms-opea/llama_index/llms/opea/__init__.py b/llama-index-integrations/llms/llama-index-llms-opea/llama_index/llms/opea/__init__.py new file mode 100644 index 0000000000000..41b17f509b393 --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-opea/llama_index/llms/opea/__init__.py @@ -0,0 +1,3 @@ +from llama_index.llms.opea.base import OPEA + +__all__ = ["OPEA"] diff --git a/llama-index-integrations/llms/llama-index-llms-opea/llama_index/llms/opea/base.py b/llama-index-integrations/llms/llama-index-llms-opea/llama_index/llms/opea/base.py new file mode 100644 index 0000000000000..da16a5edef0db --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-opea/llama_index/llms/opea/base.py @@ -0,0 +1,31 @@ +from llama_index.core.base.llms.types import ( + LLMMetadata, +) +from llama_index.core.bridge.pydantic import Field +from llama_index.llms.openai_like.base import OpenAILike + + +class OPEA(OpenAILike): + """Adapter for a OPEA LLM. + + Examples: + `pip install llama-index-llms-opea` + + ```python + from llama_index.llms.opea import OPEA + + llm = OPEA( + model="meta-llama/Meta-Llama-3.1-8B-Instruct", + api_base="http://localhost:8080/v1", + ) + ``` + """ + + is_chat_model: bool = Field( + default=True, + description=LLMMetadata.model_fields["is_chat_model"].description, + ) + + @classmethod + def class_name(cls) -> str: + return "OPEA" diff --git a/llama-index-integrations/llms/llama-index-llms-opea/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-opea/pyproject.toml new file mode 100644 index 0000000000000..34e657fea5dad --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-opea/pyproject.toml @@ -0,0 +1,63 @@ +[build-system] +build-backend = "poetry.core.masonry.api" +requires = ["poetry-core"] + +[tool.codespell] +check-filenames = true +check-hidden = true +skip = "*.csv,*.html,*.json,*.jsonl,*.pdf,*.txt,*.ipynb" + +[tool.llamahub] +contains_example = false +import_path = "llama_index.llms.opea" + +[tool.llamahub.class_authors] +OPEA = "llama-index" + +[tool.mypy] +disallow_untyped_defs = true +exclude = ["_static", "build", "examples", "notebooks", "venv"] +ignore_missing_imports = true +python_version = "3.8" + +[tool.poetry] +authors = ["Logan Markewich "] +description = "llama-index llms opea integration" +exclude = ["**/BUILD"] +license = "MIT" +name = "llama-index-llms-opea" +readme = "README.md" +version = "0.1.0" + +[tool.poetry.dependencies] +python = ">=3.8.1,<4.0" +llama-index-core = "^0.11.0" +llama-index-llms-openai-like = "^0.2.0" + +[tool.poetry.group.dev.dependencies] +ipython = "8.10.0" +jupyter = "^1.0.0" +mypy = "0.991" +pre-commit = "3.2.0" +pylint = "2.15.10" +pytest = "7.2.1" +pytest-mock = "3.11.1" +ruff = "0.0.292" +tree-sitter-languages = "^1.8.0" +types-Deprecated = ">=0.1.0" +types-PyYAML = "^6.0.12.12" +types-protobuf = "^4.24.0.4" +types-redis = "4.5.5.0" +types-requests = "2.28.11.8" +types-setuptools = "67.1.0.0" + +[tool.poetry.group.dev.dependencies.black] +extras = ["jupyter"] +version = "<=23.9.1,>=23.7.0" + +[tool.poetry.group.dev.dependencies.codespell] +extras = ["toml"] +version = ">=v2.2.6" + +[[tool.poetry.packages]] +include = "llama_index/" diff --git a/llama-index-integrations/llms/llama-index-llms-opea/tests/BUILD b/llama-index-integrations/llms/llama-index-llms-opea/tests/BUILD new file mode 100644 index 0000000000000..dabf212d7e716 --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-opea/tests/BUILD @@ -0,0 +1 @@ +python_tests() diff --git a/llama-index-integrations/llms/llama-index-llms-opea/tests/__init__.py b/llama-index-integrations/llms/llama-index-llms-opea/tests/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/llama-index-integrations/llms/llama-index-llms-opea/tests/test_llms_opea.py b/llama-index-integrations/llms/llama-index-llms-opea/tests/test_llms_opea.py new file mode 100644 index 0000000000000..90ce5449c6dff --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-opea/tests/test_llms_opea.py @@ -0,0 +1,7 @@ +from llama_index.core.base.llms.base import BaseLLM +from llama_index.llms.opea import OPEA + + +def test_embedding_class(): + names_of_base_classes = [b.__name__ for b in OPEA.__mro__] + assert BaseLLM.__name__ in names_of_base_classes