Skip to content

Commit

Permalink
add SpiceClient
Browse files Browse the repository at this point in the history
  • Loading branch information
biobootloader committed Mar 8, 2024
1 parent a2115ce commit 571e430
Show file tree
Hide file tree
Showing 2 changed files with 64 additions and 51 deletions.
11 changes: 8 additions & 3 deletions run.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,19 @@
from spice import call_llm
from spice import SpiceClient

# model = "gpt-4-0125-preview"
model = "claude-3-opus-20240229"

system_message = "You are a helpful assistant."
messages = [
{"role": "user", "content": "list 5 random words"},
]

for t in call_llm(system_message, messages, stream=True):
client = SpiceClient(model=model)

for t in client.call_llm(system_message, messages, stream=True):
print(t, end="")

print("\n####################\n")

response = call_llm(system_message, messages, stream=False)
response = client.call_llm(system_message, messages, stream=False)
print(response)
104 changes: 56 additions & 48 deletions spice.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,58 +4,66 @@
from dotenv import load_dotenv
from openai import OpenAI

load_dotenv()
_openai_client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
_anthropic_client = Anthropic(
api_key=os.environ.get("ANTHROPIC_API_KEY"),
)
_openai_model = "gpt-4-0125-preview"
_anthropic_model = "claude-3-opus-20240229"

_use_anthropic = True


def call_llm(system_message, messages, stream=False):
if _use_anthropic:
chat_completion_or_stream = _anthropic_client.messages.create(
max_tokens=1024,
system=system_message,
messages=messages,
model=_anthropic_model,
temperature=0.3,
stream=stream,
)
else:
_messages = [
{
"role": "system",
"content": system_message,
}
] + messages
chat_completion_or_stream = _openai_client.chat.completions.create(
messages=_messages,
model=_openai_model,
temperature=0.3,
stream=stream,
)

if stream:
return _stream_generator(chat_completion_or_stream)
else:
if _use_anthropic:
response = chat_completion_or_stream.content[0].text
class SpiceClient:
def __init__(self, model):
if model == "gpt-4-0125-preview":
self._provider = "openai"
elif model == "claude-3-opus-20240229":
self._provider = "anthropic"
else:
response = chat_completion_or_stream.choices[0].message.content
return response
raise ValueError(f"Unknown model {model}")

self.model = model

load_dotenv()
if self._provider == "openai":
self._client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
elif self._provider == "anthropic":
self._client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))

def _stream_generator(stream):
for chunk in stream:
if _use_anthropic:
content = ""
if chunk.type == "content_block_delta":
content = chunk.delta.text
def call_llm(self, system_message, messages, stream=False):
if self._provider == "anthropic":
chat_completion_or_stream = self._client.messages.create(
max_tokens=1024,
system=system_message,
messages=messages,
model=self.model,
temperature=0.3,
stream=stream,
)
else:
content = chunk.choices[0].delta.content
if content is not None:
yield content
_messages = [
{
"role": "system",
"content": system_message,
}
] + messages
chat_completion_or_stream = self._client.chat.completions.create(
messages=_messages,
model=self.model,
temperature=0.3,
stream=stream,
)

if stream:
return self._stream_generator(chat_completion_or_stream)
else:
if self._provider == "anthropic":
response = chat_completion_or_stream.content[0].text
else:
response = chat_completion_or_stream.choices[0].message.content
return response

def _stream_generator(self, stream):
for chunk in stream:
if self._provider == "anthropic":
content = ""
if chunk.type == "content_block_delta":
content = chunk.delta.text
else:
content = chunk.choices[0].delta.content
if content is not None:
yield content

0 comments on commit 571e430

Please sign in to comment.