Skip to content

Commit

Permalink
[WIP] Add support for OPEA LLMs in Llama-Index (#16666)
Browse files Browse the repository at this point in the history
  • Loading branch information
logan-markewich authored Jan 15, 2025
1 parent 3f7e66e commit f8f3621
Show file tree
Hide file tree
Showing 19 changed files with 662 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
llama_index/_static
.DS_Store
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
bin/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
etc/
include/
lib/
lib64/
parts/
sdist/
share/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
.ruff_cache

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints
notebooks/

# IPython
profile_default/
ipython_config.py

# pyenv
.python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
pyvenv.cfg

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# Jetbrains
.idea
modules/
*.swp

# VsCode
.vscode

# pipenv
Pipfile
Pipfile.lock

# pyright
pyrightconfig.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
poetry_requirements(
name="poetry",
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
GIT_ROOT ?= $(shell git rev-parse --show-toplevel)

help: ## Show all Makefile targets.
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}'

format: ## Run code autoformatters (black).
git ls-files | black

lint: ## Run linters: pre-commit (black, ruff, codespell) and mypy
pre-commit install && git ls-files | xargs pre-commit run --show-diff-on-failure --files

test: ## Run tests via pytest.
pytest tests

watch-docs: ## Build and watch documentation.
sphinx-autobuild docs/ docs/_build/html --open-browser --watch $(GIT_ROOT)/llama_index/
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# LlamaIndex Embeddings Integration: OPEA Embeddings

OPEA (Open Platform for Enterprise AI) is a platform for building, deploying, and scaling AI applications. As part of this platform, many core gen-ai components are available for deployment as microservices, including LLMs.

Visit [https://opea.dev](https://opea.dev) for more information, and their [GitHub](https://github.com/opea-project/GenAIComps) for the source code of the OPEA components.

## Installation

1. Install the required Python packages:

```bash
%pip install llama-index-embeddings-opea
```

## Usage

```python
from llama_index.embeddings.opea import OPEAEmbedding

embed_model = OPEAEmbedding(
model="<model_name>",
api_base="http://localhost:8080/v1",
embed_batch_size=10,
)

embeddings = embed_model.get_text_embedding("text")

embeddings = embed_model.get_text_embedding_batch(["text1", "text2"])
```
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
python_sources()
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from llama_index.embeddings.opea.base import OPEAEmbedding

__all__ = ["OPEAEmbedding"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import httpx
from typing import Any, Dict, Optional

from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
)
from llama_index.core.callbacks import CallbackManager
from llama_index.embeddings.openai import OpenAIEmbedding


class OPEAEmbedding(OpenAIEmbedding):
"""
OPEA class for embeddings.
Args:
model (str): Model for embedding.
api_base (str): The base URL for OPEA Embeddings microservice.
additional_kwargs (Dict[str, Any]): Additional kwargs for the OpenAI API.
Examples:
`pip install llama-index-embeddings-opea`
```python
from llama_index.embeddings.opea import OPEAEmbedding
embed_model = OPEAEmbedding(
model_name="...",
api_base="http://localhost:8080",
)
```
"""

def __init__(
self,
model_name: str,
api_base: str,
dimensions: Optional[int] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
api_key: Optional[str] = "fake",
**kwargs: Any,
) -> None:
super().__init__(
model_name=model_name,
dimensions=dimensions,
embed_batch_size=embed_batch_size,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
max_retries=max_retries,
timeout=timeout,
reuse_client=reuse_client,
callback_manager=callback_manager,
default_headers=default_headers,
http_client=http_client,
**kwargs,
)

@classmethod
def class_name(cls) -> str:
return "OPEAEmbedding"
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
[build-system]
build-backend = "poetry.core.masonry.api"
requires = ["poetry-core"]

[tool.codespell]
check-filenames = true
check-hidden = true
skip = "*.csv,*.html,*.json,*.jsonl,*.pdf,*.txt,*.ipynb"

[tool.llamahub]
contains_example = false
import_path = "llama_index.embeddings.opea"

[tool.llamahub.class_authors]
OPEAEmbedding = "llama-index"

[tool.mypy]
disallow_untyped_defs = true
exclude = ["_static", "build", "examples", "notebooks", "venv"]
ignore_missing_imports = true
python_version = "3.8"

[tool.poetry]
authors = ["logan-markewich"]
description = "llama-index embeddings opea integration"
exclude = ["**/BUILD"]
license = "MIT"
name = "llama-index-embeddings-opea"
readme = "README.md"
version = "0.1.0"

[tool.poetry.dependencies]
python = ">=3.8.1,<4.0"
llama-index-llms-openai = "^0.2.0"
llama-index-core = "^0.11.0"

[tool.poetry.group.dev.dependencies]
ipython = "8.10.0"
jupyter = "^1.0.0"
mypy = "0.991"
pre-commit = "3.2.0"
pylint = "2.15.10"
pytest = "7.2.1"
pytest-mock = "3.11.1"
ruff = "0.0.292"
tree-sitter-languages = "^1.8.0"
types-Deprecated = ">=0.1.0"
types-PyYAML = "^6.0.12.12"
types-protobuf = "^4.24.0.4"
types-redis = "4.5.5.0"
types-requests = "2.28.11.8"
types-setuptools = "67.1.0.0"

[tool.poetry.group.dev.dependencies.black]
extras = ["jupyter"]
version = "<=23.9.1,>=23.7.0"

[tool.poetry.group.dev.dependencies.codespell]
extras = ["toml"]
version = ">=v2.2.6"

[[tool.poetry.packages]]
include = "llama_index/"
Loading

0 comments on commit f8f3621

Please sign in to comment.