Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add SpiceClient #2

Merged
merged 3 commits into from
Mar 8, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 8 additions & 3 deletions run.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,19 @@
from spice import call_llm
from spice import SpiceClient

# model = "gpt-4-0125-preview"
model = "claude-3-opus-20240229"

system_message = "You are a helpful assistant."
messages = [
{"role": "user", "content": "list 5 random words"},
]

for t in call_llm(system_message, messages, stream=True):
client = SpiceClient(model=model)

for t in client.call_llm(system_message, messages, stream=True):
print(t, end="")

print("\n####################\n")

response = call_llm(system_message, messages, stream=False)
response = client.call_llm(system_message, messages, stream=False)
print(response)
104 changes: 56 additions & 48 deletions spice.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,58 +4,66 @@
from dotenv import load_dotenv
from openai import OpenAI

load_dotenv()
_openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
_anthropic_client = Anthropic(
api_key=os.environ.get("ANTHROPIC_API_KEY"),
)
_openai_model = "gpt-4-0125-preview"
_anthropic_model = "claude-3-opus-20240229"

_use_anthropic = True


def call_llm(system_message, messages, stream=False):
if _use_anthropic:
chat_completion_or_stream = _anthropic_client.messages.create(
max_tokens=1024,
system=system_message,
messages=messages,
model=_anthropic_model,
temperature=0.3,
stream=stream,
)
else:
_messages = [
{
"role": "system",
"content": system_message,
}
] + messages
chat_completion_or_stream = _openai_client.chat.completions.create(
messages=_messages,
model=_openai_model,
temperature=0.3,
stream=stream,
)

if stream:
return _stream_generator(chat_completion_or_stream)
else:
if _use_anthropic:
response = chat_completion_or_stream.content[0].text
class SpiceClient:
def __init__(self, model):
if model == "gpt-4-0125-preview":
self._provider = "openai"
elif model == "claude-3-opus-20240229":
self._provider = "anthropic"
else:
response = chat_completion_or_stream.choices[0].message.content
return response
raise ValueError(f"Unknown model {model}")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider adding support for more models or a default model to avoid raising an exception for unknown models. This would make the client more flexible.


self.model = model

load_dotenv()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's a good practice to call load_dotenv() at the entry point of the application to ensure environment variables are loaded once and available throughout the application. Consider moving this call outside of the class.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

good call

if self._provider == "openai":
self._client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
elif self._provider == "anthropic":
self._client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))

def _stream_generator(stream):
for chunk in stream:
if _use_anthropic:
content = ""
if chunk.type == "content_block_delta":
content = chunk.delta.text
def call_llm(self, system_message, messages, stream=False):
if self._provider == "anthropic":
chat_completion_or_stream = self._client.messages.create(
max_tokens=1024,
system=system_message,
messages=messages,
model=self.model,
temperature=0.3,
stream=stream,
)
else:
content = chunk.choices[0].delta.content
if content is not None:
yield content
_messages = [
{
"role": "system",
"content": system_message,
}
] + messages
chat_completion_or_stream = self._client.chat.completions.create(
messages=_messages,
model=self.model,
temperature=0.3,
stream=stream,
)

if stream:
return self._stream_generator(chat_completion_or_stream)
else:
if self._provider == "anthropic":
response = chat_completion_or_stream.content[0].text
else:
response = chat_completion_or_stream.choices[0].message.content
return response

def _stream_generator(self, stream):
for chunk in stream:
if self._provider == "anthropic":
content = ""
if chunk.type == "content_block_delta":
content = chunk.delta.text
else:
content = chunk.choices[0].delta.content
if content is not None:
yield content
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The check for content is not None might be redundant since you're initializing content to an empty string and only changing it if certain conditions are met. Consider removing this check.

Loading