diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..494f68a --- /dev/null +++ b/.gitignore @@ -0,0 +1,165 @@ +chat.json +.git.old/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control +.pdm.toml +.pdm-python +.pdm-build/ + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/README.md b/README.md new file mode 100644 index 0000000..602ff19 --- /dev/null +++ b/README.md @@ -0,0 +1,53 @@ +# Ploppie + +A high-level, stupid-simple Pythonic LiteLLM abstraction layer for implementing simple chat workflows, with tools. Supports vision and audio models. Includes facilities for easy (de)serialization of chat histories. + +So stupid that I couldn't come up with a better name. + +## Installation + +```bash +pip install ploppie +``` + +## Usage + +### Simple chat +```python +from ploppie import Chat + +chat = Chat() + +response = chat.system("You are a helpful assistant.") \ + .user("What is the capital of France?") \ + .ready() + +print(response) +``` + +### Chat with tools +```python +from ploppie import Chat + +chat = Chat() + +@chat.tool("Perform mathematical calculations") +def calculate(expression: "str: The expression to calculate"): + return eval(expression) + +print(chat.send("What is 2502 * 2502, and 2858 - 28592? Please tell me the results.")) +``` + +### Chat with vision +```python +from ploppie import Chat +from ploppie.messages import Image + +chat = Chat() + +response = chat.system("You are a helpful assistant.") \ + .user(Image(file_handle=open("beautiful_landscape.png", "rb"))) \ + .ready() + +print(response) +``` \ No newline at end of file diff --git a/SCOPE.md b/SCOPE.md new file mode 100644 index 0000000..4162b40 --- /dev/null +++ b/SCOPE.md @@ -0,0 +1,83 @@ +Ploppie is a high-level, stupid-simple Pythonic LiteLLM abstraction layer for implementing simple chat workflows, with tools. No more messing around with dictionaries and JSON, no more OpenAI-specific APIs. Just plain Python. + +Supports vision, audio, and any other LiteLLM features. + +# Vanilla LiteLLM Example + +```python +from litellm import completion + +# Define a system prompt +system_prompt = "You are a helpful assistant that can perform calculations." + +# Define a tool for basic math +def calculate(expression): + try: + return str(eval(expression)) + except: + return "Invalid expression" + +# Example conversation +messages = [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": "What is 25 * 4?"} +] + +# Get completion from LiteLLM +response = completion( + model="gpt-4o-mini", + messages=messages, + functions=[{ + "name": "calculate", + "description": "Perform basic mathematical calculations", + "parameters": { + "type": "object", + "properties": { + "expression": { + "type": "string", + "description": "The mathematical expression to evaluate" + } + }, + "required": ["expression"] + } + }] +) + +# Print the response +print(response.choices[0].message) +``` + +# Ploppie Example +```python +from ploppie import Chat, Tool, System + +chat = Chat(model="gpt-4o-mini") + +@chat.tool(description="Perform basic mathematical calculations.") +def calculate( + expression: "str: The result of the calculation as a string, or 'Invalid expression' if evaluation fails") -> str: + try: + return str(eval(expression)) + except: + return "Invalid expression" + +chat.system("You are a helpful assistant that can perform calculations.")\ + .user("What is 25 * 4?")\ + .ready() + +print(_) +``` + +# Vision Example + +```python +from ploppie import Chat, System, User, Assistant + +chat = Chat(model="gpt-4o-mini") + +a = chat.system("Identify the objects in the image.") \ + .user(Image(content=open("path/to/image.jpg", "rb"))) \ + .ready() # Signals chat is ready for interaction + +print(a) +``` diff --git a/TODO.md b/TODO.md new file mode 100644 index 0000000..574911a --- /dev/null +++ b/TODO.md @@ -0,0 +1,3 @@ +- [ ] Streaming support +- [ ] JSON mode +- [x] Image and file support diff --git a/examples/chat_test.py b/examples/chat_test.py new file mode 100644 index 0000000..7a0dbbb --- /dev/null +++ b/examples/chat_test.py @@ -0,0 +1,47 @@ +from ploppie import Chat +from ploppie.messages import System +from ploppie.messages.files import Image + +import os +import re +from datetime import datetime + +if __name__ == "__main__": + chat = Chat(model="gpt-4o-mini") + + @chat.dynamic() + def dynamic_message(): + return System("The current time is " + datetime.now().strftime("%H:%M:%S")) + + @chat.tool("Perform mathematical calculations") + def calculate(expression: "str: The expression to calculate"): + print(f"Calculating {expression}") + + try: + return eval(expression) + except Exception as e: + return f"I'm sorry, I can't calculate that. ({e})" + + while True: + input_ = input(" ") + + # Parse the input for a file path + + inputs = [input_] + + file_path_match = re.search(r'(?:^|\s)([\'"]?)([a-zA-Z0-9_\-./\\]+\.(png|jpg|jpeg|gif|webp))\1(?:\s|$)', input_) + if file_path_match: + file_path = file_path_match.group(2) + + if os.path.exists(file_path): + print(f"* Found image: {file_path}") + image = Image(open(file_path, 'rb')) + inputs.append(image) + else: + chat.system(f"File not found: {file_path} - please inform the user") + print(f"* File not found: {file_path}") + + responses = chat.send(inputs) + + for response in responses: + print(f" {response}") \ No newline at end of file diff --git a/examples/json_test.py b/examples/json_test.py new file mode 100644 index 0000000..499e5c3 --- /dev/null +++ b/examples/json_test.py @@ -0,0 +1,12 @@ +from ploppie import Chat +import random + +if __name__ == "__main__": + # You can pass any standard LiteLLM parameters to the Chat object + chat = Chat(model="gpt-4o-mini", response_format={"type": "json_object"}) + + response = chat.system("Take any input and convert it to a sensible JSON object.") \ + .user("Make a JSON object that represents a cat.") \ + .ready() + + print(response) \ No newline at end of file diff --git a/examples/save_test.py b/examples/save_test.py new file mode 100644 index 0000000..315b95e --- /dev/null +++ b/examples/save_test.py @@ -0,0 +1,24 @@ +import json +import os +from ploppie import Chat + +if __name__ == "__main__": + chat = Chat(model="gpt-4o-mini") + + @chat.tool("Perform mathematical calculations") + def calculate(expression: "str: The expression to calculate"): + return eval(expression) + + if os.path.exists("chat.json"): + with open("chat.json", "r") as f: + chat.from_dict(json.load(f)) + + while True: + input_ = input(" ") + responses = chat.send(input_) + + for response in responses: + print(f" {response}") + + with open("chat.json", "w") as f: + json.dump(chat.to_dict(), f) \ No newline at end of file diff --git a/examples/stream_test.py b/examples/stream_test.py new file mode 100644 index 0000000..f8c39e4 --- /dev/null +++ b/examples/stream_test.py @@ -0,0 +1,42 @@ +from ploppie import Chat + +# from litellm import completion +# import sys + +# response = completion( +# model="gpt-4o-mini", +# messages=[ +# {"role": "system", "content": "You're a helpful assistant."}, +# {"role": "user", "content": "What is the capital of France?"} +# ], +# stream=True +# ) + +# chunks = [] + +# for chunk in response: +# chunks.append(chunk) +# print(chunk.json()) + +# # help(chunks[0]) + +# sys.exit() + +if __name__ == "__main__": + + # You can pass any standard LiteLLM parameters to the Chat object + chat = Chat(model="gpt-4o-mini", stream=False) + + @chat.tool("Perform mathematical calculations") + def calculate(expression: "str: The expression to calculate"): + return eval(expression) + + response = chat.system("You're a helpful assistant.") \ + .user("What is the capital of France? Afterrwards, calculate 50 * 50.") \ + .ready() + + print("Response: ", end="", flush=True) + for chunk in response: + print(chunk, end="", flush=True) + + print("") \ No newline at end of file diff --git a/examples/test.png b/examples/test.png new file mode 100644 index 0000000..2d65c9d Binary files /dev/null and b/examples/test.png differ diff --git a/examples/tool_test.py b/examples/tool_test.py new file mode 100644 index 0000000..1879690 --- /dev/null +++ b/examples/tool_test.py @@ -0,0 +1,15 @@ +from ploppie import Chat +import random + +if __name__ == "__main__": + chat = Chat(model="gpt-4o-mini") + + @chat.tool("Perform mathematical calculations") + def calculate(expression: "str: The expression to calculate"): + return eval(expression) + + @chat.tool("Random number generator") + def random_number(min: "int: The minimum value", max: "int: The maximum value"): + return random.randint(min, max) + + print(chat.send("What is 2502 * 2502, and 2858 - 28592? Please tell me the result. And then throw me a random number for giggles.")) \ No newline at end of file diff --git a/examples/utility_test.py b/examples/utility_test.py new file mode 100644 index 0000000..307bab4 --- /dev/null +++ b/examples/utility_test.py @@ -0,0 +1,31 @@ +from ploppie import Utility +import random + +if __name__ == "__main__": + utility = Utility(model="gpt-4o-mini") + + # Generate a random pretend error message + error_messages = [ + "The database connection failed. Please check your credentials and try again.", + "The authentication credentials are invalid. Please check your credentials and try again.", + "The network connection timed out. Please check your network connection and try again.", + "You do not have permission to access this resource. Please check your permissions and try again." + ] + + error_message = random.choice(error_messages) + + print(f"Error message: {error_message}") + + # Diagnose the root cause of the pretend error + result = utility.selector( + f"Based on the error message, what is the most likely root cause? Error message: {error_message}", + options=[ + "DATABASE_CONNECTION_ERROR", + "INVALID_AUTHENTICATION_CREDENTIALS", + "NETWORK_TIMEOUT", + "INSUFFICIENT_PERMISSIONS" + ] + ) + + # Print the diagnosed root cause + print(f"Diagnosed root cause: {result}") diff --git a/examples/vision_test.py b/examples/vision_test.py new file mode 100644 index 0000000..64e9acc --- /dev/null +++ b/examples/vision_test.py @@ -0,0 +1,12 @@ +from ploppie import Chat + +from ploppie.messages.files import Image + +if __name__ == "__main__": + chat = Chat(model="gpt-4o-mini") + + a = chat.system("Identify the objects in the image.") \ + .user(Image(file_handle=open("examples/test.png", "rb"))) \ + .ready() + + print(a) \ No newline at end of file diff --git a/ploppie/__init__.py b/ploppie/__init__.py new file mode 100644 index 0000000..788f1c1 --- /dev/null +++ b/ploppie/__init__.py @@ -0,0 +1,5 @@ +from .chat import Chat +from .utility import Utility +from .messages import System, User, Assistant, ToolCall, ToolResult + +__all__ = ["Chat", "Utility", "System", "User", "Assistant", "ToolCall", "ToolResult"] \ No newline at end of file diff --git a/ploppie/chat.py b/ploppie/chat.py new file mode 100644 index 0000000..124ea31 --- /dev/null +++ b/ploppie/chat.py @@ -0,0 +1,315 @@ +from litellm import completion +import logging +import traceback + +from .messages import System, User, Assistant, ToolCall, ToolResult, Dynamic, from_dict, to_dict + +class Chat: + def __init__(self, **kwargs): + self.kwargs = kwargs + self.messages = [] + self.tools = {} + + # Set up logging + self.logger = logging.getLogger(__name__) + self.logger.setLevel(logging.INFO) + + # Create console handler if no handlers exist + if not self.logger.handlers: + console_handler = logging.StreamHandler() + console_handler.setLevel(logging.INFO) + formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + console_handler.setFormatter(formatter) + self.logger.addHandler(console_handler) + + def __str__(self): + return f"" + + def to_dict(self): + return [to_dict(m) for m in self.messages] + + def from_dict(self, dict): + # Clear existing messages + self.messages = [] + + # Convert each dict to a Message, skipping any ToolCall or ToolResult messages + for m in dict: + if m["type"] == "ToolResult": + continue + + if m["type"] == "Assistant": + m["data"]["tool_calls"] = [] + + self.messages.append(from_dict(m)) + + def dynamic(self): + """ + Decorator for adding a dynamic message to the chat. + The decorated function will be called whenever the message needs to be retrieved. + The function should return a Message object. + """ + def decorator(func): + # Find an existing Dynamic message that has no callback and assign the function to it + # Useful when restoring a chat from a database where the callback cannot be serialized, + # and we want to re-assign the dynamic message at the same position in the chat + for message in self.messages: + if isinstance(message, Dynamic): + if message._callback == None: + message._callback = func + return func + + # Create a Dynamic message that will call the function when needed + message = Dynamic(func) + + # Add the message to the chat + self.messages.append(message) + return func + + return decorator + + def tool(self, description: str): + """ + Decorator for adding a tool to the chat + """ + def decorator(func): + self.tools[func.__name__] = { + "description": description, + "parameters": func.__annotations__, + "function": func + } + return func + return decorator + + @property + def tools_to_dict(self): + """ + Converts the tools to the OpenAI JSON schema format + """ + tools_dict = [] + for name, tool in self.tools.items(): + tool_dict = { + "type": "function", + "function": { + "name": name, + "description": tool["description"], + "parameters": { + "type": "object", + "properties": {}, + "required": [] + } + } + } + + # Add each parameter as a property + for param in tool["parameters"]: + if param == "return": continue + + # Get the function annotation for this parameter + annotation = tool["parameters"][param] + # Split the annotation string on ": " to get type and description + if isinstance(annotation, str) and ": " in annotation: + param_type, param_desc = annotation.split(": ", 1) + + # Map param_type to JSON Schema type + param_type = { + "int": "number", + "str": "string", + "bool": "boolean", + "float": "number" + }.get(param_type.lower(), "string") + + tool_dict["function"]["parameters"]["properties"][param] = { + "type": param_type.lower(), + "description": param_desc + } + tool_dict["function"]["parameters"]["required"].append(param) + continue + + # If no annotation or not in string format, guess string + tool_dict["function"]["parameters"]["properties"][param] = { + "type": "string", + "description": annotation + } + tool_dict["function"]["parameters"]["required"].append(param) + + tools_dict.append(tool_dict) + + return tools_dict + + @property + def messages_to_dict(self): + """ + Converts the messages to the OpenAI message format + """ + + # Filter out any assistant messages with tool calls that don't have corresponding tool results + filtered_messages = [] + i = 0 + + while i < len(self.messages): + message = self.messages[i] + + # If this is an assistant message with tool calls + if isinstance(message, Assistant) and message.data.get("tool_calls"): + # Look ahead for tool results for each tool call + tool_calls = message.data["tool_calls"] + all_tools_have_results = True + + # Check if all tool calls have corresponding results + for tool_call in tool_calls: + found_result = False + for j in range(i + 1, len(self.messages)): + if isinstance(self.messages[j], ToolResult): + if self.messages[j].data.get("tool_call_id") == tool_call.id: + found_result = True + break + if not found_result: + all_tools_have_results = False + break + + # Only keep the message if all its tool calls have results + if all_tools_have_results: + filtered_messages.append(message) + else: + filtered_messages.append(message) + + i += 1 + + self.messages = filtered_messages + return [m.to_json() for m in self.messages if m.to_json() is not None] + + def system(self, message: str): + """ + Adds a system message to the chat + """ + self.messages.append(System(content=message)) + return self + + def user(self, message: str): + """ + Adds a user message to the chat + """ + self.messages.append(User(content=message)) + return self + + def assistant(self, message: str): + """ + Adds an assistant message to the chat + """ + self.messages.append(Assistant(content=message)) + return self + + def append(self, message): + """ + Appends a message to the chat + """ + if isinstance(message, list): + self.messages.extend(message) + else: + self.messages.append(message) + return self + + def send(self, message: str): + """ + Sends a user message to the LLM and automatically handles the response + """ + self.messages.append(User(content=message)) + return self.ready() + + def call_tool(self, tool_call: ToolCall): + """ + Calls a tool with the given ToolCall object, used internally by the ready() method + """ + tool_result = self.tools[tool_call.name]["function"](**tool_call.arguments) + + self.logger.debug(f"Tool call {tool_call.name} returned") + + self.messages.append(ToolResult( + content=str(tool_result), + name=tool_call.name, + tool_call_id=tool_call.id + )) + + @property + def stream(self): + """ + Whether to stream the response from the LLM + """ + return self.kwargs.get("stream", False) + + def parse_chunk(self, chunk): + """ + Parses a chunk from the LLM + """ + pass + + def ready(self): + """ + Sends the messages to the LLM and handles the response + """ + responses = [] + iteration = 0 # Used for debugging + + while True: + iteration += 1 + self.logger.debug(f"This is iteration #{iteration}") + + try: + response = completion( + messages=self.messages_to_dict, + tools=self.tools_to_dict if self.tools else None, + **self.kwargs + ) + except Exception as e: + self.logger.error(f"Fatal error in completion: {e}") + self.logger.error(traceback.format_exc()) + raise e + + if self.stream: + for chunk in response: + self.parse_chunk(chunk) + + else: + response = response.json() + message = response["choices"][0]["message"] + + tool_calls = [] + if "tool_calls" in message: + if isinstance(message["tool_calls"], list): + # Convert the tool calls to our ToolCall objects + tool_calls = [ + ToolCall( + name=t["function"]["name"], + arguments=t["function"]["arguments"], + id=t["id"] + ) for t in message["tool_calls"] + ] + + # Get the content of the message - some APIs return null, yet + # still require an empty string, so we check for that + content = message["content"] or "" + + self.messages.append(Assistant( + content=content, + tool_calls=tool_calls + )) + + # If there is content, add it to the responses + if content: + responses.append(content) + + # If there are tool calls, execute them + if tool_calls: + self.logger.debug(f"There are {len(tool_calls)} tool calls") + # Iterate through each tool call and execute it + for tool_call in tool_calls: + self.logger.debug(f"Executing tool call {tool_call.name}") + + # Execute the tool call + self.call_tool(tool_call) + + else: + # If there are no tool calls, return the responses + self.logger.debug("There are no tool calls on this iteration") + self.logger.debug(f"Returning {len(responses)} responses") + return responses diff --git a/ploppie/messages/__init__.py b/ploppie/messages/__init__.py new file mode 100644 index 0000000..aceb4f8 --- /dev/null +++ b/ploppie/messages/__init__.py @@ -0,0 +1,50 @@ +from .message import Message +from .system import System +from .user import User +from .assistant import Assistant +from .toolcall import ToolCall +from .toolresult import ToolResult +from .dynamic import Dynamic + +import inspect + +__all__ = ["Message", "System", "User", "Assistant", "ToolCall", "ToolResult", "Dynamic"] + +def to_dict(message): + """ Convert a Message to a dictionary format, for database storage """ + def encode_value(value): + if isinstance(value, dict): + if "..serialized.." in value: + return value["..serialized.."] + else: + return {k: encode_value(v) for k, v in value.items()} + elif isinstance(value, list): + return [encode_value(v) for v in value] + elif hasattr(value, 'to_dict'): + return encode_value(value.to_dict()) + + return value + + return encode_value(message.to_dict()) + +def from_dict(dict): + """ Convert a dictionary back to a Message saved from to_dict() """ + type = dict["type"] + data = dict["data"] + + # Get the class from the type + message_class = globals()[type] + + # Create a new instance with None for all required args + sig = inspect.signature(message_class.__init__) + args = { + param.name: None + for param in sig.parameters.values() + if param.name != 'self' + } + message = message_class(**args) + + # Set the data directly + message.data = data + + return message \ No newline at end of file diff --git a/ploppie/messages/assistant.py b/ploppie/messages/assistant.py new file mode 100644 index 0000000..8f42716 --- /dev/null +++ b/ploppie/messages/assistant.py @@ -0,0 +1,27 @@ +from .message import Message +from .toolcall import ToolCall + +class Assistant(Message): + def __init__(self, content: str, tool_calls=[], tool_result=None): + super().__init__("assistant", content) + + self.data["tool_calls"] = tool_calls + self.data["tool_result"] = tool_result + + def to_json(self): + a = super().to_json() + + if self.data["tool_calls"]: + # Convert to OpenAI tool call format + # Convert any dict tool calls to ToolCall objects + if any(isinstance(t, dict) for t in self.data["tool_calls"]): + self.data["tool_calls"] = [ + t if not isinstance(t, dict) else ToolCall.from_dict(t) + for t in self.data["tool_calls"] + ] + a["tool_calls"] = [t.to_json() for t in self.data["tool_calls"]] + + if self.data["tool_result"]: + a["tool_result"] = self.data["tool_result"] + + return a \ No newline at end of file diff --git a/ploppie/messages/dynamic.py b/ploppie/messages/dynamic.py new file mode 100644 index 0000000..77a8975 --- /dev/null +++ b/ploppie/messages/dynamic.py @@ -0,0 +1,35 @@ +class Dynamic: + def __init__(self, callback): + self._callback = callback + + def __call__(self): + if not self._callback: + raise NotImplementedError("Dynamic message has no callback") + + return self._callback() + + def __str__(self): + return str(self.__call__()) + + @property + def content(self): + return self.__call__().content + + @property + def role(self): + return self.__call__().role + + def to_dict(self): + return { + "type": "Dynamic", + "data": { + "callback": None # Can't serialize a function + } + } + + def to_json(self): + """ Convert to OpenAI message format """ + try: + return self.__call__().to_json() + except Exception as e: + return None diff --git a/ploppie/messages/files/__init__.py b/ploppie/messages/files/__init__.py new file mode 100644 index 0000000..2a737b9 --- /dev/null +++ b/ploppie/messages/files/__init__.py @@ -0,0 +1,4 @@ +from .image import Image +from .audio import Audio + +__all__ = ["Image", "Audio"] \ No newline at end of file diff --git a/ploppie/messages/files/audio.py b/ploppie/messages/files/audio.py new file mode 100644 index 0000000..736f27a --- /dev/null +++ b/ploppie/messages/files/audio.py @@ -0,0 +1,49 @@ +import base64 + +class Audio: + def __init__(self, file_handle): + self.file_handle = file_handle + + def __str__(self): + return f"" + + @property + def format(self): + # Read the first few bytes to identify the audio format + current_pos = self.file_handle.tell() + header = self.file_handle.read(12) + self.file_handle.seek(current_pos) + + # RIFF header for WAV + if header.startswith(b'RIFF') and header[8:12] == b'WAVE': + return "wav" + # MP3 header + elif header.startswith(b'ID3') or (header[0:2] == b'\xFF\xFB'): + return "mp3" + # OGG header + elif header.startswith(b'OggS'): + return "ogg" + # FLAC header + elif header.startswith(b'fLaC'): + return "flac" + + # Default to wav if unknown + return "wav" + + @property + def read(self): + self.file_handle.seek(0) + return self.file_handle.read() + + @property + def base64(self): + return base64.b64encode(self.read).decode() + + def to_json(self): + return { + "type": "input_audio", + "input_audio": { + "data": self.base64, + "format": self.format + } + } \ No newline at end of file diff --git a/ploppie/messages/files/document.py b/ploppie/messages/files/document.py new file mode 100644 index 0000000..d04f1bb --- /dev/null +++ b/ploppie/messages/files/document.py @@ -0,0 +1,20 @@ +class Document: + def __init__(self, file_handle): + self.file_handle = file_handle + + def __str__(self): + return f"" + + @property + def read(self): + self.file_handle.seek(0) + return self.file_handle.read() + + def to_json(self): + return { + "type": "input_document", + "input_document": { + "data": self.read, + "format": self.format + } + } \ No newline at end of file diff --git a/ploppie/messages/files/image.py b/ploppie/messages/files/image.py new file mode 100644 index 0000000..f4e5eb2 --- /dev/null +++ b/ploppie/messages/files/image.py @@ -0,0 +1,91 @@ +from PIL import Image as PILImage + +import io +import base64 +import imghdr + +class Image: + def __init__(self, file_handle): + self.file_handle = file_handle + self.converted_image = None + + def __str__(self): + return f"" + + def read_file(self): + self.file_handle.seek(0) + return self.file_handle.read() + + def convert_image(self, image_format: str, quality: int = 90, width: int = 600, height: int = 600): + + # Read the image data + image_data = self.read_file() + + # Open the image using PIL + with PILImage.open(io.BytesIO(image_data)) as img: + # Resize the image while maintaining aspect ratio + img.thumbnail((width, height)) + + # Create a new BytesIO object to store the converted image + output = io.BytesIO() + + # Convert and save the image to the specified format + img.save(output, format=image_format.upper(), quality=quality) + + # Reset the BytesIO object to the beginning + output.seek(0) + + # Update the file_handle with the new image data + return output + + def convert_to_base64(self, image_data): + # Encode the image data to base64 + image_data.seek(0) + return base64.b64encode(image_data.read()).decode("utf-8") + + @property + def safe_image(self): + # Return cached version if it exists + if self.converted_image: + self.converted_image[1].seek(0) + return self.converted_image + + # Read image data + image_data = self.read_file() + + # Retrieve image format through parsing the file data + image_format = imghdr.what(None, image_data) + + # Check file size (2MB = 2 * 1024 * 1024 bytes) + if len(image_data) > 2 * 1024 * 1024: + needs_conversion = True + else: + # Check dimensions + with PILImage.open(io.BytesIO(image_data)) as img: + width, height = img.size + needs_conversion = width > 600 or height > 600 + + if needs_conversion: + # Create converted version + image_data = self.convert_image("jpeg", quality=90, width=600, height=600) + + # Cache the converted version + self.converted_image = ("jpeg", image_data) + return self.converted_image + + # Image is safe to use as-is + return (image_format, image_data) + + @property + def data_url(self): + image_format, image_data = self.safe_image + return f"data:image/{image_format};base64,{self.convert_to_base64(image_data)}" + + def to_json(self): + # Convert to OpenAI image format + return { + "type": "image_url", + "image_url": { + "url": self.data_url + } + } \ No newline at end of file diff --git a/ploppie/messages/message.py b/ploppie/messages/message.py new file mode 100644 index 0000000..afd0755 --- /dev/null +++ b/ploppie/messages/message.py @@ -0,0 +1,51 @@ +from .files import Image, Audio + +class Message: + def __init__(self, role: str, content): + self.data = { + "role": role, + "content": content + } + + def __str__(self): + return f"" + + @property + def role(self): + return self.data["role"] + + @property + def content(self): + content = [] + + if not isinstance(self.data["content"], list): + self.data["content"] = [self.data["content"]] + + for item in self.data["content"]: + if isinstance(item, str): + content.append(item) + elif type(item) in [Image, Audio]: + content.append(item.to_json()) + else: + raise ValueError(f"Unsupported content type: {type(item)}") + + # If there is only one item, return it directly + # OpenAI expects a single item, not a list of one item + if len(content) == 1: + if isinstance(content[0], str): + return content[0] + + # If it's a file, though, we'll return the single item list + + return content + + def to_dict(self): + """ Convert to a dictionary format, for database storage """ + return { + "type": self.__class__.__name__, + "data": self.data + } + + def to_json(self): + """ Convert to OpenAI message format """ + return {"role": self.role, "content": self.content} \ No newline at end of file diff --git a/ploppie/messages/system.py b/ploppie/messages/system.py new file mode 100644 index 0000000..f60679f --- /dev/null +++ b/ploppie/messages/system.py @@ -0,0 +1,5 @@ +from .message import Message + +class System(Message): + def __init__(self, content: str): + super().__init__("system", content) \ No newline at end of file diff --git a/ploppie/messages/toolcall.py b/ploppie/messages/toolcall.py new file mode 100644 index 0000000..518f509 --- /dev/null +++ b/ploppie/messages/toolcall.py @@ -0,0 +1,48 @@ +import json + +class ToolCall: + def __init__(self, name: str, arguments: dict, id: str): + self.data = { + "name": name, + "arguments": arguments, + "id": id + } + + def __str__(self): + return f"" + + @property + def name(self): + return self.data["name"] + + @property + def arguments(self): + if isinstance(self.data["arguments"], str): + return json.loads(self.data["arguments"]) + return self.data["arguments"] + + @property + def id(self): + return self.data["id"] + + @classmethod + def from_dict(cls, dict): + return cls(**dict["data"]) + + def to_dict(self): + return { + "..serialized..": { + "type": "ToolCall", + "data": self.data + } + } + + def to_json(self): + return { + "type": "function", + "function": { + "name": self.name, + "arguments": json.dumps(self.arguments) + }, + "id": self.id + } \ No newline at end of file diff --git a/ploppie/messages/toolresult.py b/ploppie/messages/toolresult.py new file mode 100644 index 0000000..607dcdc --- /dev/null +++ b/ploppie/messages/toolresult.py @@ -0,0 +1,26 @@ +from .message import Message + +class ToolResult(Message): + """ Used to return the result of a tool call to the assistant """ + def __init__(self, content: str, name: str, tool_call_id: str): + super().__init__("tool", content) + self.data["name"] = name + self.data["tool_call_id"] = tool_call_id + + def __str__(self): + return f"" + + @property + def name(self): + return self.data["name"] + + @property + def tool_call_id(self): + return self.data["tool_call_id"] + + def to_json(self): + # Convert to OpenAI tool result format + a = super().to_json() + a["name"] = self.name + a["tool_call_id"] = self.tool_call_id + return a \ No newline at end of file diff --git a/ploppie/messages/user.py b/ploppie/messages/user.py new file mode 100644 index 0000000..a2b9281 --- /dev/null +++ b/ploppie/messages/user.py @@ -0,0 +1,5 @@ +from .message import Message + +class User(Message): + def __init__(self, content: str): + super().__init__("user", content) \ No newline at end of file diff --git a/ploppie/utility.py b/ploppie/utility.py new file mode 100644 index 0000000..4fe10bb --- /dev/null +++ b/ploppie/utility.py @@ -0,0 +1,49 @@ +from .chat import Chat + +class Utility: + def __init__(self, **kwargs): + self.kwargs = kwargs + + @property + def chat(self): + return Chat(**self.kwargs) + + def selector(self, message: str, options: list, attempts: int = 3): + """ + Prompts the LLM to select one option from a list of choices. + + :param message: The prompt or question to ask the LLM + :type message: str + :param options: List of valid options the LLM can choose from + :type options: list + :param attempts: Number of attempts before raising error, defaults to 3 + :type attempts: int, optional + + :returns: The selected option that matches one from the options list + :rtype: str + + :raises ValueError: If no valid selection is made within the allowed attempts + """ + chat = self.chat + attempt = 0 + + while attempt < attempts: + # Add system message explaining the constraints + chat.system(f"You must respond with exactly one of these options: {', '.join(options)}") + chat.system(message) + + # Get response from LLM + responses = chat.ready() + response = responses[0] if isinstance(responses, list) else responses + + # Check if response matches any option + for option in options: + if option.lower() == response.lower().strip(): + return option + + attempt += 1 + + # Add error message for invalid response + chat.system(f"Invalid selection. Please choose exactly one option from: {', '.join(options)}") + + raise ValueError(f"Failed to get valid selection after {attempts} attempts") diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..46f5f79 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +litellm +pillow diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..a251f5b --- /dev/null +++ b/setup.py @@ -0,0 +1,23 @@ +from setuptools import setup, find_packages + +setup( + name="ploppie", + version="0.3.0", + packages=find_packages(), + install_requires=[ + "litellm", + "pillow", + ], + author="Ben Baptist", + author_email="benbaptist.com", + description="A high-level, stupid-simple Pythonic LiteLLM abstraction layer for implementing simple chat workflows, with tools.", + long_description=open("README.md").read(), + long_description_content_type="text/markdown", + url="", + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + ], + python_requires=">=3.7", +)