Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

deprecate TypedModel and properly onboard pydantic v2 #630

Closed
wants to merge 8 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions apps/langchain_agent/telephony_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from dotenv import load_dotenv
from fastapi import FastAPI

from vocode.streaming.models.events import Event, EventType
from vocode.streaming.models.events import Event
from vocode.streaming.models.transcript import TranscriptCompleteEvent
from vocode.streaming.telephony.config_manager.redis_config_manager import RedisConfigManager
from vocode.streaming.telephony.server.base import TelephonyServer
Expand All @@ -23,14 +23,13 @@

class EventsManager(events_manager.EventsManager):
def __init__(self):
super().__init__(subscriptions=[EventType.TRANSCRIPT_COMPLETE])
super().__init__(subscriptions=["transcript_complete"])

async def handle_event(self, event: Event):
if event.type == EventType.TRANSCRIPT_COMPLETE:
transcript_complete_event = typing.cast(TranscriptCompleteEvent, event)
if isinstance(event, TranscriptCompleteEvent):
add_transcript(
transcript_complete_event.conversation_id,
transcript_complete_event.transcript.to_string(),
event.conversation_id,
event.transcript.to_string(),
)


Expand Down
8 changes: 5 additions & 3 deletions apps/telephony_app/speller_agent.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
from typing import Optional, Tuple
from typing import Literal, Optional, Tuple

from vocode.streaming.agent.abstract_factory import AbstractAgentFactory
from vocode.streaming.agent.base_agent import BaseAgent, RespondAgent
from vocode.streaming.agent.chat_gpt_agent import ChatGPTAgent
from vocode.streaming.models.agent import AgentConfig, AgentType, ChatGPTAgentConfig
from vocode.streaming.models.agent import AgentConfig, ChatGPTAgentConfig


class SpellerAgentConfig(AgentConfig, type="agent_speller"):
class SpellerAgentConfig(AgentConfig):
"""Configuration for SpellerAgent. Inherits from AgentConfig."""

type: Literal["agent_speller"] = "agent_speller"

pass


Expand Down
25 changes: 13 additions & 12 deletions playground/streaming/agent/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import typing

from dotenv import load_dotenv
from pydantic.v1 import BaseModel
from pydantic import BaseModel

from vocode.streaming.action.abstract_factory import AbstractActionFactory
from vocode.streaming.action.base_action import BaseAction
Expand All @@ -28,8 +28,9 @@
from vocode.streaming.agent import ChatGPTAgent
from vocode.streaming.agent.base_agent import (
AgentResponse,
AgentResponseFillerAudio,
AgentResponseMessage,
AgentResponseType,
AgentResponseStop,
BaseAgent,
TranscriptionAgentInput,
)
Expand All @@ -39,7 +40,8 @@
BACKCHANNELS = ["Got it", "Sure", "Okay", "I understand"]


class ShoutActionConfig(ActionConfig, type="shout"): # type: ignore
class ShoutActionConfig(ActionConfig):
type: typing.Literal["shout"] = "shout"
num_exclamation_marks: int


Expand Down Expand Up @@ -114,16 +116,15 @@ async def receiver():
try:
event = await agent_response_queue.get()
response = event.payload
if response.type == AgentResponseType.FILLER_AUDIO:
if isinstance(response, AgentResponseFillerAudio):
print("Would have sent filler audio")
elif response.type == AgentResponseType.STOP:
elif isinstance(response, AgentResponseStop):
print("Agent returned stop")
ended = True
break
elif response.type == AgentResponseType.MESSAGE:
agent_response = typing.cast(AgentResponseMessage, response)
elif isinstance(response, AgentResponseMessage):

if isinstance(agent_response.message, EndOfTurn):
if isinstance(response.message, EndOfTurn):
ignore_until_end_of_turn = False
if random.random() < backchannel_probability:
backchannel = random.choice(BACKCHANNELS)
Expand All @@ -133,20 +134,20 @@ async def receiver():
conversation_id,
is_backchannel=True,
)
elif isinstance(agent_response.message, BaseMessage):
elif isinstance(response.message, BaseMessage):
if ignore_until_end_of_turn:
continue

message_sent: str
is_final: bool
# TODO: consider allowing the user to interrupt the agent manually by responding fast
if random.random() < interruption_probability:
stop_idx = random.randint(0, len(agent_response.message.text))
message_sent = agent_response.message.text[:stop_idx]
stop_idx = random.randint(0, len(response.message.text))
message_sent = response.message.text[:stop_idx]
ignore_until_end_of_turn = True
is_final = False
else:
message_sent = agent_response.message.text
message_sent = response.message.text
is_final = True

agent.transcript.add_bot_message(
Expand Down
5 changes: 4 additions & 1 deletion tests/fixtures/synthesizer.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import wave
from io import BytesIO
from typing import Literal

from vocode.streaming.models.message import BaseMessage
from vocode.streaming.models.synthesizer import SynthesizerConfig
Expand All @@ -17,9 +18,11 @@ def create_fake_audio(message: str, synthesizer_config: SynthesizerConfig):
return file


class TestSynthesizerConfig(SynthesizerConfig, type="synthesizer_test"):
class TestSynthesizerConfig(SynthesizerConfig):
__test__ = False

type: Literal["synthesizer_test"] = "synthesizer_test"


class TestSynthesizer(BaseSynthesizer[TestSynthesizerConfig]):
"""Accepts text and creates a SynthesisResult containing audio data which is the same as the text as bytes."""
Expand Down
5 changes: 4 additions & 1 deletion tests/fixtures/transcriber.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
import asyncio
from typing import Literal

from vocode.streaming.models.transcriber import TranscriberConfig
from vocode.streaming.transcriber.base_transcriber import BaseAsyncTranscriber, Transcription


class TestTranscriberConfig(TranscriberConfig, type="transcriber_test"):
class TestTranscriberConfig(TranscriberConfig):
__test__ = False

type: Literal["transcriber_test"] = "transcriber_test"


class TestAsyncTranscriber(BaseAsyncTranscriber[TestTranscriberConfig]):
"""Accepts fake audio chunks and sends out transcriptions which are the same as the audio chunks."""
Expand Down
2 changes: 1 addition & 1 deletion tests/streaming/action/test_end_conversation.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from uuid import UUID

import pytest
from pydantic.v1 import BaseModel
from pydantic import BaseModel
from pytest_mock import MockerFixture

from tests.fakedata.id import generate_uuid
Expand Down
27 changes: 22 additions & 5 deletions tests/streaming/agent/test_openai_utils.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
from typing import Literal

from pydantic import BaseModel

from vocode.streaming.agent.openai_utils import format_openai_chat_messages_from_transcript
from vocode.streaming.models.actions import (
ACTION_FINISHED_FORMAT_STRING,
Expand All @@ -11,7 +15,15 @@
from vocode.streaming.models.transcript import ActionFinish, ActionStart, Message, Transcript


class WeatherActionConfig(ActionConfig, type="weather"):
class WeatherActionConfig(ActionConfig):
type: Literal["weather"] = "weather"


class WeatherActionParams(BaseModel):
pass


class WeatherActionResponse(BaseModel):
pass


Expand All @@ -23,12 +35,12 @@ def test_format_openai_chat_messages_from_transcript():
test_action_input_nophrase = ActionInput(
action_config=WeatherActionConfig(),
conversation_id="asdf",
params={},
params=WeatherActionParams(),
)
test_action_input_phrase = ActionInput(
action_config=WeatherActionConfig(action_trigger=create_fake_vocode_phrase_trigger()),
conversation_id="asdf",
params={},
params=WeatherActionParams(),
)

test_cases = [
Expand Down Expand Up @@ -85,7 +97,9 @@ def test_format_openai_chat_messages_from_transcript():
ActionFinish(
action_type="weather",
action_input=test_action_input_nophrase,
action_output=ActionOutput(action_type="weather", response={}),
action_output=ActionOutput(
action_type="weather", response=WeatherActionResponse()
),
),
]
),
Expand Down Expand Up @@ -127,7 +141,9 @@ def test_format_openai_chat_messages_from_transcript():
ActionFinish(
action_type="weather",
action_input=test_action_input_phrase,
action_output=ActionOutput(action_type="weather", response={}),
action_output=ActionOutput(
action_type="weather", response=WeatherActionResponse()
),
),
]
),
Expand Down Expand Up @@ -301,3 +317,4 @@ def test_format_openai_chat_messages_from_transcript_context_limit():

for params, expected_output in test_cases:
assert format_openai_chat_messages_from_transcript(*params) == expected_output
assert format_openai_chat_messages_from_transcript(*params) == expected_output
2 changes: 1 addition & 1 deletion tests/streaming/agent/test_streaming_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import pytest
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
from pydantic.v1 import BaseModel
from pydantic import BaseModel

from vocode.streaming.agent.openai_utils import openai_get_tokens
from vocode.streaming.agent.streaming_utils import collate_response_async
Expand Down
47 changes: 47 additions & 0 deletions tests/streaming/models/test_adaptive_object.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
from abc import ABC
from typing import Any, Literal

from vocode.streaming.models.adaptive_object import AdaptiveObject


class B(AdaptiveObject, ABC):
type: Any


class SubB1(B):
type: Literal["sub_b1"] = "sub_b1"
x: int


class SubB2(B):
type: Literal["sub_b2"] = "sub_b2"
y: int


class A(AdaptiveObject, ABC):
type: Any
b: B


class SubA1(A):
type: Literal["sub_a1"] = "sub_a1"
x: int


class SubA2(A):
type: Literal["sub_a2"] = "sub_a2"
y: int


def test_serialize():
sub_a1 = SubA1(b=SubB1(x=2), x=1)
assert sub_a1.model_dump() == {"b": {"type": "sub_b1", "x": 2}, "type": "sub_a1", "x": 1}


def test_deserialize():
d = {"b": {"type": "sub_b1", "x": 2}, "type": "sub_a1", "x": 1}
sub_a1 = A.model_validate(d)
assert isinstance(sub_a1, SubA1)
assert isinstance(sub_a1.b, SubB1)
assert sub_a1.b.x == 2
assert sub_a1.x == 1
2 changes: 1 addition & 1 deletion tests/streaming/test_streaming_conversation.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from unittest.mock import MagicMock

import pytest
from pydantic.v1 import BaseModel
from pydantic import BaseModel
from pytest_mock import MockerFixture

from tests.fakedata.conversation import (
Expand Down
24 changes: 12 additions & 12 deletions tests/streaming/utils/test_events_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import pytest

from vocode.streaming.models.events import EventType, PhoneCallEndedEvent
from vocode.streaming.models.events import PhoneCallEndedEvent
from vocode.streaming.utils.events_manager import EventsManager

CONVERSATION_ID = "1"
Expand All @@ -19,26 +19,26 @@ async def test_initialization():
@pytest.mark.asyncio
async def test_publish_event():
event = PhoneCallEndedEvent(
conversation_id=CONVERSATION_ID, type=EventType.PHONE_CALL_ENDED
conversation_id=CONVERSATION_ID, type="event_phone_call_ended"
) # Replace with actual Event creation
manager = EventsManager([EventType.PHONE_CALL_ENDED])
manager = EventsManager(["event_phone_call_ended"])
manager.publish_event(event)
assert not manager.queue.empty()


@pytest.mark.asyncio
async def test_handle_event_default_implementation():
event = PhoneCallEndedEvent(
conversation_id=CONVERSATION_ID, type=EventType.PHONE_CALL_ENDED
conversation_id=CONVERSATION_ID, type="event_phone_call_ended"
) # Replace with actual Event creation
manager = EventsManager([EventType.PHONE_CALL_ENDED])
manager = EventsManager(["event_phone_call_ended"])
await manager.handle_event(event)


@pytest.mark.asyncio
async def test_handle_event_non_async_override(mocker):
event = PhoneCallEndedEvent(conversation_id=CONVERSATION_ID, type=EventType.PHONE_CALL_ENDED)
manager = EventsManager([EventType.PHONE_CALL_ENDED])
event = PhoneCallEndedEvent(conversation_id=CONVERSATION_ID, type="event_phone_call_ended")
manager = EventsManager(["event_phone_call_ended"])
manager.publish_event(event)

error_logger_mock = mocker.patch("vocode.streaming.utils.events_manager.logger.error")
Expand All @@ -53,9 +53,9 @@ async def test_handle_event_non_async_override(mocker):
@pytest.mark.asyncio
async def test_start_and_active_loop():
event = PhoneCallEndedEvent(
conversation_id=CONVERSATION_ID, type=EventType.PHONE_CALL_ENDED
conversation_id=CONVERSATION_ID, type="event_phone_call_ended"
) # Replace with actual Event creation
manager = EventsManager([EventType.PHONE_CALL_ENDED])
manager = EventsManager(["event_phone_call_ended"])
asyncio.create_task(manager.start())
manager.publish_event(event)
await asyncio.sleep(0.1)
Expand All @@ -64,8 +64,8 @@ async def test_start_and_active_loop():

@pytest.mark.asyncio
async def test_flush_method():
event = PhoneCallEndedEvent(conversation_id=CONVERSATION_ID, type=EventType.PHONE_CALL_ENDED)
manager = EventsManager([EventType.PHONE_CALL_ENDED])
event = PhoneCallEndedEvent(conversation_id=CONVERSATION_ID, type="event_phone_call_ended")
manager = EventsManager(["event_phone_call_ended"])
for _ in range(5):
manager.publish_event(event)
await manager.flush()
Expand All @@ -74,6 +74,6 @@ async def test_flush_method():

@pytest.mark.asyncio
async def test_queue_empty_and_timeout():
manager = EventsManager([EventType.TRANSCRIPT])
manager = EventsManager(["event_transcript"])
await manager.flush()
assert manager.queue.empty()
Loading
Loading