Skip to content

Commit

Permalink
Anthropic v0.7 (#437)
Browse files Browse the repository at this point in the history
* upgrading anthropic client to 0.7

* fixing mypy for anthropic client update
  • Loading branch information
skirdey authored Nov 30, 2023
1 parent 09aed42 commit e6d6d73
Show file tree
Hide file tree
Showing 4 changed files with 54 additions and 41 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ credentials.json
benchmark_results/
private.key
dump.rdb
.idea
56 changes: 35 additions & 21 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ six = "^1.16.0"
opentelemetry-sdk = "^1.17.0"
janus = "^1.0.0"
scipy = "^1.10.1"
anthropic = "^0.2.9"
anthropic = "^0.7.1"

elevenlabs = {version = "^0.2.6", optional = true}
google-cloud-texttospeech = {version = "^2.14.1", optional = true}
Expand Down
36 changes: 17 additions & 19 deletions vocode/streaming/agent/anthropic_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def __init__(

# streaming not well supported by langchain, so we will connect directly
self.anthropic_client = (
anthropic.Client(api_key=anthropic_api_key)
anthropic.AsyncAnthropic(api_key=anthropic_api_key)
if agent_config.generate_responses
else None
)
Expand Down Expand Up @@ -98,25 +98,23 @@ async def generate_response(
self.memory.chat_memory.messages.append(bot_memory_message)
prompt = self.llm._convert_messages_to_prompt(self.memory.chat_memory.messages)

streamed_response = await self.anthropic_client.acompletion_stream(
prompt=prompt,
max_tokens_to_sample=self.agent_config.max_tokens_to_sample,
model=self.agent_config.model_name,
)

buffer = ""
async for message in streamed_response:
completion = message["completion"]
delta = completion[len(bot_memory_message.content + buffer) :]
buffer += delta

sentence, remainder = get_sentence_from_buffer(buffer)
if self.anthropic_client:
streamed_response = await self.anthropic_client.completions.create(
prompt=prompt,
max_tokens_to_sample=self.agent_config.max_tokens_to_sample,
model=self.agent_config.model_name,
stream=True,
)

if sentence:
bot_memory_message.content = bot_memory_message.content + sentence
buffer = remainder
yield sentence, True
continue
buffer = ""
async for completion in streamed_response:
buffer += completion.completion
sentence, remainder = get_sentence_from_buffer(buffer)
if sentence:
bot_memory_message.content = bot_memory_message.content + sentence
buffer = remainder
yield sentence, True
continue

def update_last_bot_message_on_cut_off(self, message: str):
for memory_message in self.memory.chat_memory.messages[::-1]:
Expand Down

0 comments on commit e6d6d73

Please sign in to comment.