diff --git a/README.md b/README.md index 0802195..54e3c18 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,16 @@ Our goal is to make LLMs easier to use for developers, turning complex AI capabi ### Pre-requisite - Python >= 3.9 +- Environment variable `OPENAI_API_KEY` or other LLM API keys, for more details, see [here](https://nerif-ai.com/docs/nerif-environment) +- Set default model and embedding model with `NERIF_DEFAULT_LLM_MODEL` and `NERIF_DEFAULT_EMBEDDING_MODEL`, for more details, see [here](https://nerif-ai.com/docs/nerif-environment) + +Example: + +```bash +export OPENAI_API_KEY=`your_api_key` +export NERIF_DEFAULT_LLM_MODEL=gpt-4o +export NERIF_DEFAULT_EMBEDDING_MODEL=text-embedding-3-small +``` ### Install @@ -33,6 +43,7 @@ from nerif.core import nerif from nerif.model import SimpleChatModel model = SimpleChatModel() +# Default model is `gpt-4o` # Use nerif judge "natural language statement" if nerif("the sky is blue"): diff --git a/docs/setup_guide.md b/docs/setup_guide.md deleted file mode 100644 index 579bec7..0000000 --- a/docs/setup_guide.md +++ /dev/null @@ -1,18 +0,0 @@ -# Guide for setting environment up for development - - - locate to the root directory - - create a new python environment - - `python -m venv venv; ./venv/Scripts/activate` - - run `pip install -e .` in command lines - - If you want to setup the environment for api keys instead of manually put it every single time, set up the environmental variables with your open ai api keys - - in windows you could do `setx OPENAI_API_KEY sk-xxxxxxxxx` - - in macos/linux you could do `export OPENAI_API_KEY=sk-xxxxxxxxx` - - The same goes for open router `OPENROUTER_API_KEY` - - to test if everything is working, do the following: - - go to interactive command lines in python `python` - ``` - from nerif import SimpleChatAgent - agent = SimpleChatAgent() - agent.chat("Hello world!") - >> 'Hello! How can I assist you today?' - ``` \ No newline at end of file diff --git a/nerif/model/__init__.py b/nerif/model/__init__.py index c22fc31..6a8d367 100644 --- a/nerif/model/__init__.py +++ b/nerif/model/__init__.py @@ -1,3 +1,4 @@ +from .audio_model import AudioModel, SpeechModel from .model import LogitsChatModel, SimpleChatModel, SimpleEmbeddingModel, VisionModel __all__ = [ @@ -5,4 +6,6 @@ "SimpleChatModel", "SimpleEmbeddingModel", "VisionModel", + "AudioModel", + "SpeechModel", ] diff --git a/nerif/model/audio_model.py b/nerif/model/audio_model.py new file mode 100644 index 0000000..bd235ed --- /dev/null +++ b/nerif/model/audio_model.py @@ -0,0 +1,52 @@ +import base64 +from pathlib import Path +from typing import Any, List, Optional + +from openai import OpenAI + +from ..utils import ( + LOGGER, + NERIF_DEFAULT_LLM_MODEL, + OPENAI_MODEL, + MessageType, + NerifTokenCounter, + get_litellm_embedding, + get_litellm_response, + get_ollama_response, + get_sllm_response, + get_vllm_response, +) + + +class AudioModel: + """ + A simple agent for audio tasks. (audio transcription, speech to text) + """ + + def __init__(self): + self.client = OpenAI() + + def transcribe(self, file: Path): + with open(file, "rb") as audio_file: + transcription = self.client.audio.transcriptions.create( + model="whisper-1", + file=audio_file, + ) + return transcription + + +class SpeechModel: + """ + A simple agent for speech tasks. (speech model, text to speech) + """ + + def __init__(self): + self.client = OpenAI() + + def text_to_speech(self, text: str, voice: str = "alloy"): + response = self.client.audio.speech.create( + model="tts-1", + input=text, + voice=voice, + ) + return response diff --git a/nerif/utils/utils.py b/nerif/utils/utils.py index 0cf8cfb..51efdd5 100644 --- a/nerif/utils/utils.py +++ b/nerif/utils/utils.py @@ -185,7 +185,13 @@ def get_litellm_response( "messages": messages, } else: - raise ValueError(f"Model {model} not supported") + # default method: use openai style + kargs = { + "model": model, + "messages": messages, + "api_key": api_key, + "base_url": base_url, + } kargs["stream"] = stream kargs["temperature"] = temperature