diff --git a/alembic/versions/22a6e413d89c_remove_module_field_on_tool.py b/alembic/versions/22a6e413d89c_remove_module_field_on_tool.py new file mode 100644 index 0000000000..8d05aabeaf --- /dev/null +++ b/alembic/versions/22a6e413d89c_remove_module_field_on_tool.py @@ -0,0 +1,31 @@ +"""Remove module field on tool + +Revision ID: 22a6e413d89c +Revises: 88f9432739a9 +Create Date: 2025-01-10 17:38:23.811795 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "22a6e413d89c" +down_revision: Union[str, None] = "88f9432739a9" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("tools", "module") + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.add_column("tools", sa.Column("module", sa.VARCHAR(), autoincrement=False, nullable=True)) + # ### end Alembic commands ### diff --git a/alembic/versions/7778731d15e2_added_jobusagestatistics_table.py b/alembic/versions/7778731d15e2_added_jobusagestatistics_table.py new file mode 100644 index 0000000000..92c3302d27 --- /dev/null +++ b/alembic/versions/7778731d15e2_added_jobusagestatistics_table.py @@ -0,0 +1,53 @@ +"""Added JobUsageStatistics table + +Revision ID: 7778731d15e2 +Revises: 8d70372ad130 +Create Date: 2025-01-09 13:20:25.555740 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "7778731d15e2" +down_revision: Union[str, None] = "8d70372ad130" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # Create job_usage_statistics table + op.create_table( + "job_usage_statistics", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("job_id", sa.String(), nullable=False), + sa.Column("step_id", sa.String(), nullable=True), + sa.Column("completion_tokens", sa.Integer(), server_default=sa.text("0"), nullable=False), + sa.Column("prompt_tokens", sa.Integer(), server_default=sa.text("0"), nullable=False), + sa.Column("total_tokens", sa.Integer(), server_default=sa.text("0"), nullable=False), + sa.Column("step_count", sa.Integer(), server_default=sa.text("0"), nullable=False), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True), + sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("FALSE"), nullable=False), + sa.Column("_created_by_id", sa.String(), nullable=True), + sa.Column("_last_updated_by_id", sa.String(), nullable=True), + sa.ForeignKeyConstraint(["job_id"], ["jobs.id"], name="fk_job_usage_statistics_job_id", ondelete="CASCADE"), + sa.PrimaryKeyConstraint("id", name="pk_job_usage_statistics"), + ) + + # Create indexes + op.create_index("ix_job_usage_statistics_created_at", "job_usage_statistics", ["created_at"]) + op.create_index("ix_job_usage_statistics_job_id", "job_usage_statistics", ["job_id"]) + + +def downgrade() -> None: + # Drop indexes + op.drop_index("ix_job_usage_statistics_created_at", "job_usage_statistics") + op.drop_index("ix_job_usage_statistics_job_id", "job_usage_statistics") + + # Drop table + op.drop_table("job_usage_statistics") diff --git a/alembic/versions/7f652fdd3dba_change_jobmessage_unique_constraint_to_.py b/alembic/versions/7f652fdd3dba_change_jobmessage_unique_constraint_to_.py new file mode 100644 index 0000000000..b1be20b113 --- /dev/null +++ b/alembic/versions/7f652fdd3dba_change_jobmessage_unique_constraint_to_.py @@ -0,0 +1,33 @@ +"""change JobMessage unique constraint to (job_id,message_id) + +Revision ID: 7f652fdd3dba +Revises: 22a6e413d89c +Create Date: 2025-01-13 14:36:13.626344 + +""" + +from typing import Sequence, Union + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "7f652fdd3dba" +down_revision: Union[str, None] = "22a6e413d89c" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # Drop the old unique constraint + op.drop_constraint("uq_job_messages_message_id", "job_messages", type_="unique") + + # Add the new composite unique constraint + op.create_unique_constraint("unique_job_message", "job_messages", ["job_id", "message_id"]) + + +def downgrade() -> None: + # Drop the new composite constraint + op.drop_constraint("unique_job_message", "job_messages", type_="unique") + + # Restore the old unique constraint + op.create_unique_constraint("uq_job_messages_message_id", "job_messages", ["message_id"]) diff --git a/alembic/versions/88f9432739a9_add_jobtype_to_job_table.py b/alembic/versions/88f9432739a9_add_jobtype_to_job_table.py new file mode 100644 index 0000000000..199173dbc7 --- /dev/null +++ b/alembic/versions/88f9432739a9_add_jobtype_to_job_table.py @@ -0,0 +1,37 @@ +"""add JobType to Job table + +Revision ID: 88f9432739a9 +Revises: 7778731d15e2 +Create Date: 2025-01-10 13:46:44.089110 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa + +from alembic import op +from letta.orm.enums import JobType + +# revision identifiers, used by Alembic. +revision: str = "88f9432739a9" +down_revision: Union[str, None] = "7778731d15e2" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # Add job_type column with default value + op.add_column("jobs", sa.Column("job_type", sa.String(), nullable=True)) + + # Set existing rows to have the default value of JobType.JOB + job_value = JobType.JOB.value + op.execute(f"UPDATE jobs SET job_type = '{job_value}' WHERE job_type IS NULL") + + # Make the column non-nullable after setting default values + op.alter_column("jobs", "job_type", existing_type=sa.String(), nullable=False) + + +def downgrade() -> None: + # Remove the job_type column + op.drop_column("jobs", "job_type") diff --git a/alembic/versions/8d70372ad130_adding_jobmessages_table.py b/alembic/versions/8d70372ad130_adding_jobmessages_table.py new file mode 100644 index 0000000000..6df8c8621a --- /dev/null +++ b/alembic/versions/8d70372ad130_adding_jobmessages_table.py @@ -0,0 +1,47 @@ +"""adding JobMessages table + +Revision ID: 8d70372ad130 +Revises: cdb3db091113 +Create Date: 2025-01-08 17:57:20.325596 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "8d70372ad130" +down_revision: Union[str, None] = "cdb3db091113" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.create_table( + "job_messages", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("job_id", sa.String(), nullable=False), + sa.Column("message_id", sa.String(), nullable=False), + sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True), + sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("FALSE"), nullable=False), + sa.Column("_created_by_id", sa.String(), nullable=True), + sa.Column("_last_updated_by_id", sa.String(), nullable=True), + sa.ForeignKeyConstraint(["job_id"], ["jobs.id"], name="fk_job_messages_job_id", ondelete="CASCADE"), + sa.ForeignKeyConstraint(["message_id"], ["messages.id"], name="fk_job_messages_message_id", ondelete="CASCADE", use_alter=True), + sa.PrimaryKeyConstraint("id", name="pk_job_messages"), + sa.UniqueConstraint("message_id", name="uq_job_messages_message_id"), + ) + + # Add indexes + op.create_index("ix_job_messages_job_id", "job_messages", ["job_id"], unique=False) + op.create_index("ix_job_messages_created_at", "job_messages", ["created_at"], unique=False) + + +def downgrade() -> None: + op.drop_index("ix_job_messages_created_at", "job_messages") + op.drop_index("ix_job_messages_job_id", "job_messages") + op.drop_table("job_messages") diff --git a/alembic/versions/f595e0e8013e_adding_request_config_to_job_table.py b/alembic/versions/f595e0e8013e_adding_request_config_to_job_table.py new file mode 100644 index 0000000000..d53a30a233 --- /dev/null +++ b/alembic/versions/f595e0e8013e_adding_request_config_to_job_table.py @@ -0,0 +1,31 @@ +"""adding request_config to Job table + +Revision ID: f595e0e8013e +Revises: 7f652fdd3dba +Create Date: 2025-01-14 14:34:34.203363 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "f595e0e8013e" +down_revision: Union[str, None] = "7f652fdd3dba" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.add_column("jobs", sa.Column("request_config", sa.JSON, nullable=True)) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("jobs", "request_config") + # ### end Alembic commands ### diff --git a/examples/tool_rule_usage.py b/examples/tool_rule_usage.py index 54e051e2d7..8ec061d0c7 100644 --- a/examples/tool_rule_usage.py +++ b/examples/tool_rule_usage.py @@ -6,7 +6,7 @@ from letta.schemas.tool_rule import ChildToolRule, InitToolRule, TerminalToolRule from tests.helpers.endpoints_helper import assert_invoked_send_message_with_keyword, setup_agent from tests.helpers.utils import cleanup -from tests.test_model_letta_perfomance import llm_config_dir +from tests.test_model_letta_performance import llm_config_dir """ This example shows how you can constrain tool calls in your agent. diff --git a/letta/__init__.py b/letta/__init__.py index d681a255e8..dd79db9586 100644 --- a/letta/__init__.py +++ b/letta/__init__.py @@ -1,5 +1,6 @@ __version__ = "0.6.9" + # import clients from letta.client.client import LocalClient, RESTClient, create_client diff --git a/letta/agent.py b/letta/agent.py index ebf21b82a5..913b46b89c 100644 --- a/letta/agent.py +++ b/letta/agent.py @@ -12,6 +12,7 @@ FIRST_MESSAGE_ATTEMPTS, FUNC_FAILED_HEARTBEAT_MESSAGE, LETTA_CORE_TOOL_MODULE_NAME, + LETTA_MULTI_AGENT_TOOL_MODULE_NAME, LLM_MAX_TOKENS, MESSAGE_SUMMARY_TRUNC_KEEP_N_LAST, MESSAGE_SUMMARY_TRUNC_TOKEN_FRAC, @@ -25,6 +26,7 @@ from letta.llm_api.helpers import is_context_overflow_error from letta.llm_api.llm_api_tools import create from letta.local_llm.utils import num_tokens_from_functions, num_tokens_from_messages +from letta.log import get_logger from letta.memory import summarize_messages from letta.orm import User from letta.orm.enums import ToolType @@ -44,6 +46,7 @@ from letta.services.agent_manager import AgentManager from letta.services.block_manager import BlockManager from letta.services.helpers.agent_manager_helper import check_supports_structured_output, compile_memory_metadata_block +from letta.services.job_manager import JobManager from letta.services.message_manager import MessageManager from letta.services.passage_manager import PassageManager from letta.services.tool_execution_sandbox import ToolExecutionSandbox @@ -128,6 +131,7 @@ def __init__( self.message_manager = MessageManager() self.passage_manager = PassageManager() self.agent_manager = AgentManager() + self.job_manager = JobManager() # State needed for heartbeat pausing @@ -141,6 +145,9 @@ def __init__( # Load last function response from message history self.last_function_response = self.load_last_function_response() + # Logger that the Agent specifically can use, will also report the agent_state ID with the logs + self.logger = get_logger(agent_state.id) + def load_last_function_response(self): """Load the last function response from message history""" in_context_messages = self.agent_manager.get_in_context_messages(agent_id=self.agent_state.id, actor=self.user) @@ -205,6 +212,10 @@ def execute_tool_and_persist_state(self, function_name: str, function_args: dict callable_func = get_function_from_module(LETTA_CORE_TOOL_MODULE_NAME, function_name) function_args["self"] = self # need to attach self to arg since it's dynamically linked function_response = callable_func(**function_args) + elif target_letta_tool.tool_type == ToolType.LETTA_MULTI_AGENT_CORE: + callable_func = get_function_from_module(LETTA_MULTI_AGENT_TOOL_MODULE_NAME, function_name) + function_args["self"] = self # need to attach self to arg since it's dynamically linked + function_response = callable_func(**function_args) elif target_letta_tool.tool_type == ToolType.LETTA_MEMORY_CORE: callable_func = get_function_from_module(LETTA_CORE_TOOL_MODULE_NAME, function_name) agent_state_copy = self.agent_state.__deepcopy__() @@ -675,11 +686,15 @@ def inner_step( skip_verify: bool = False, stream: bool = False, # TODO move to config? step_count: Optional[int] = None, + metadata: Optional[dict] = None, ) -> AgentStepResponse: """Runs a single step in the agent loop (generates at most one LLM call)""" try: + # Extract job_id from metadata if present + job_id = metadata.get("job_id") if metadata else None + # Step 0: update core memory # only pulling latest block data if shared memory is being used current_persisted_memory = Memory( @@ -754,9 +769,17 @@ def inner_step( f"last response total_tokens ({current_total_tokens}) < {MESSAGE_SUMMARY_WARNING_FRAC * int(self.agent_state.llm_config.context_window)}" ) + # Persisting into Messages self.agent_state = self.agent_manager.append_to_in_context_messages( all_new_messages, agent_id=self.agent_state.id, actor=self.user ) + if job_id: + for message in all_new_messages: + self.job_manager.add_message_to_job( + job_id=job_id, + message_id=message.id, + actor=self.user, + ) return AgentStepResponse( messages=all_new_messages, @@ -784,6 +807,7 @@ def inner_step( first_message_retry_limit=first_message_retry_limit, skip_verify=skip_verify, stream=stream, + metadata=metadata, ) else: diff --git a/letta/client/client.py b/letta/client/client.py index 5de076e43c..686171e258 100644 --- a/letta/client/client.py +++ b/letta/client/client.py @@ -22,14 +22,17 @@ ) from letta.schemas.file import FileMetadata from letta.schemas.job import Job +from letta.schemas.letta_message import LettaMessage, LettaMessageUnion from letta.schemas.letta_request import LettaRequest, LettaStreamingRequest from letta.schemas.letta_response import LettaResponse, LettaStreamingResponse from letta.schemas.llm_config import LLMConfig from letta.schemas.memory import ArchivalMemorySummary, ChatMemory, CreateArchivalMemory, Memory, RecallMemorySummary from letta.schemas.message import Message, MessageCreate, MessageUpdate +from letta.schemas.openai.chat_completion_response import UsageStatistics from letta.schemas.openai.chat_completions import ToolCall from letta.schemas.organization import Organization from letta.schemas.passage import Passage +from letta.schemas.run import Run from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfig, SandboxConfigCreate, SandboxConfigUpdate from letta.schemas.source import Source, SourceCreate, SourceUpdate from letta.schemas.tool import Tool, ToolCreate, ToolUpdate @@ -433,11 +436,22 @@ def __init__( self._default_llm_config = default_llm_config self._default_embedding_config = default_embedding_config - def list_agents(self, tags: Optional[List[str]] = None, match_all_tags: bool = False) -> List[AgentState]: - params = {"match_all_tags": match_all_tags} + def list_agents( + self, tags: Optional[List[str]] = None, query_text: Optional[str] = None, limit: int = 50, cursor: Optional[str] = None + ) -> List[AgentState]: + params = {"limit": limit} if tags: params["tags"] = tags + params["match_all_tags"] = False + + if query_text: + params["query_text"] = query_text + + if cursor: + params["cursor"] = cursor + response = requests.get(f"{self.base_url}/{self.api_prefix}/agents", headers=self.headers, params=params) + print(f"\nLIST RESPONSE\n{response.json()}\n") return [AgentState(**agent) for agent in response.json()] def agent_exists(self, agent_id: str) -> bool: @@ -543,6 +557,7 @@ def create_agent( "embedding_config": embedding_config if embedding_config else self._default_embedding_config, "initial_message_sequence": initial_message_sequence, "tags": tags, + "include_base_tools": include_base_tools, } # Only add name if it's not None @@ -983,7 +998,7 @@ def send_message_async( role: str, agent_id: Optional[str] = None, name: Optional[str] = None, - ) -> Job: + ) -> Run: """ Send a message to an agent (async, returns a job) @@ -1006,7 +1021,7 @@ def send_message_async( ) if response.status_code != 200: raise ValueError(f"Failed to send message: {response.text}") - response = Job(**response.json()) + response = Run(**response.json()) return response @@ -1980,6 +1995,153 @@ def update_block( raise ValueError(f"Failed to update block: {response.text}") return Block(**response.json()) + def get_run_messages( + self, + run_id: str, + cursor: Optional[str] = None, + limit: Optional[int] = 100, + ascending: bool = True, + role: Optional[MessageRole] = None, + ) -> List[LettaMessageUnion]: + """ + Get messages associated with a job with filtering options. + + Args: + job_id: ID of the job + cursor: Cursor for pagination + limit: Maximum number of messages to return + ascending: Sort order by creation time + role: Filter by message role (user/assistant/system/tool) + Returns: + List of messages matching the filter criteria + """ + params = { + "cursor": cursor, + "limit": limit, + "ascending": ascending, + "role": role, + } + # Remove None values + params = {k: v for k, v in params.items() if v is not None} + + response = requests.get(f"{self.base_url}/{self.api_prefix}/runs/{run_id}/messages", params=params) + if response.status_code != 200: + raise ValueError(f"Failed to get run messages: {response.text}") + return [LettaMessage(**message) for message in response.json()] + + def get_run_usage( + self, + run_id: str, + ) -> List[UsageStatistics]: + """ + Get usage statistics associated with a job. + + Args: + job_id (str): ID of the job + + Returns: + List[UsageStatistics]: List of usage statistics associated with the job + """ + response = requests.get( + f"{self.base_url}/{self.api_prefix}/runs/{run_id}/usage", + headers=self.headers, + ) + if response.status_code != 200: + raise ValueError(f"Failed to get run usage statistics: {response.text}") + return [UsageStatistics(**stat) for stat in [response.json()]] + + def get_run(self, run_id: str) -> Run: + """ + Get a run by ID. + + Args: + run_id (str): ID of the run + + Returns: + run (Run): Run + """ + response = requests.get( + f"{self.base_url}/{self.api_prefix}/runs/{run_id}", + headers=self.headers, + ) + if response.status_code != 200: + raise ValueError(f"Failed to get run: {response.text}") + return Run(**response.json()) + + def delete_run(self, run_id: str) -> None: + """ + Delete a run by ID. + + Args: + run_id (str): ID of the run + """ + response = requests.delete( + f"{self.base_url}/{self.api_prefix}/runs/{run_id}", + headers=self.headers, + ) + if response.status_code != 200: + raise ValueError(f"Failed to delete run: {response.text}") + + def list_runs(self) -> List[Run]: + """ + List all runs. + + Returns: + runs (List[Run]): List of runs + """ + response = requests.get( + f"{self.base_url}/{self.api_prefix}/runs", + headers=self.headers, + ) + if response.status_code != 200: + raise ValueError(f"Failed to list runs: {response.text}") + return [Run(**run) for run in response.json()] + + def list_active_runs(self) -> List[Run]: + """ + List all active runs. + + Returns: + runs (List[Run]): List of active runs + """ + response = requests.get( + f"{self.base_url}/{self.api_prefix}/runs/active", + headers=self.headers, + ) + if response.status_code != 200: + raise ValueError(f"Failed to list active runs: {response.text}") + return [Run(**run) for run in response.json()] + + def get_tags( + self, + cursor: Optional[str] = None, + limit: Optional[int] = None, + query_text: Optional[str] = None, + ) -> List[str]: + """ + Get a list of all unique tags. + + Args: + cursor: Optional cursor for pagination (last tag seen) + limit: Optional maximum number of tags to return + query_text: Optional text to filter tags + + Returns: + List[str]: List of unique tags + """ + params = {} + if cursor: + params["cursor"] = cursor + if limit: + params["limit"] = limit + if query_text: + params["query_text"] = query_text + + response = requests.get(f"{self.base_url}/{self.api_prefix}/tags", headers=self.headers, params=params) + if response.status_code != 200: + raise ValueError(f"Failed to get tags: {response.text}") + return response.json() + class LocalClient(AbstractClient): """ @@ -2038,10 +2200,12 @@ def __init__( self.organization = self.server.get_organization_or_default(self.org_id) # agents - def list_agents(self, tags: Optional[List[str]] = None, match_all_tags: bool = False) -> List[AgentState]: + def list_agents( + self, query_text: Optional[str] = None, tags: Optional[List[str]] = None, limit: int = 100, cursor: Optional[str] = None + ) -> List[AgentState]: self.interface.clear() - return self.server.agent_manager.list_agents(actor=self.user, tags=tags, match_all_tags=match_all_tags) + return self.server.agent_manager.list_agents(actor=self.user, tags=tags, query_text=query_text, limit=limit, cursor=cursor) def agent_exists(self, agent_id: Optional[str] = None, agent_name: Optional[str] = None) -> bool: """ @@ -2087,6 +2251,7 @@ def create_agent( tool_ids: Optional[List[str]] = None, tool_rules: Optional[List[BaseToolRule]] = None, include_base_tools: Optional[bool] = True, + include_multi_agent_tools: bool = False, # metadata metadata: Optional[Dict] = {"human:": DEFAULT_HUMAN, "persona": DEFAULT_PERSONA}, description: Optional[str] = None, @@ -2104,6 +2269,7 @@ def create_agent( tools (List[str]): List of tools tool_rules (Optional[List[BaseToolRule]]): List of tool rules include_base_tools (bool): Include base tools + include_multi_agent_tools (bool): Include multi agent tools metadata (Dict): Metadata description (str): Description tags (List[str]): Tags for filtering agents @@ -2113,11 +2279,6 @@ def create_agent( """ # construct list of tools tool_ids = tool_ids or [] - tool_names = [] - if include_base_tools: - tool_names += BASE_TOOLS - tool_names += BASE_MEMORY_TOOLS - tool_ids += [self.server.tool_manager.get_tool_by_name(tool_name=name, actor=self.user).id for name in tool_names] # check if default configs are provided assert embedding_config or self._default_embedding_config, f"Embedding config must be provided" @@ -2140,6 +2301,7 @@ def create_agent( "tool_ids": tool_ids, "tool_rules": tool_rules, "include_base_tools": include_base_tools, + "include_multi_agent_tools": include_multi_agent_tools, "system": system, "agent_type": agent_type, "llm_config": llm_config if llm_config else self._default_llm_config, @@ -3433,3 +3595,104 @@ def update_block( if label: data["label"] = label return self.server.block_manager.update_block(block_id, actor=self.user, block_update=BlockUpdate(**data)) + + def get_run_messages( + self, + run_id: str, + cursor: Optional[str] = None, + limit: Optional[int] = 100, + ascending: bool = True, + role: Optional[MessageRole] = None, + ) -> List[LettaMessageUnion]: + """ + Get messages associated with a job with filtering options. + + Args: + run_id: ID of the run + cursor: Cursor for pagination + limit: Maximum number of messages to return + ascending: Sort order by creation time + role: Filter by message role (user/assistant/system/tool) + + Returns: + List of messages matching the filter criteria + """ + params = { + "cursor": cursor, + "limit": limit, + "ascending": ascending, + "role": role, + } + return self.server.job_manager.get_run_messages_cursor(run_id=run_id, actor=self.user, **params) + + def get_run_usage( + self, + run_id: str, + ) -> List[UsageStatistics]: + """ + Get usage statistics associated with a job. + + Args: + run_id (str): ID of the run + + Returns: + List[UsageStatistics]: List of usage statistics associated with the run + """ + usage = self.server.job_manager.get_job_usage(job_id=run_id, actor=self.user) + return [ + UsageStatistics(completion_tokens=stat.completion_tokens, prompt_tokens=stat.prompt_tokens, total_tokens=stat.total_tokens) + for stat in usage + ] + + def get_run(self, run_id: str) -> Run: + """ + Get a run by ID. + + Args: + run_id (str): ID of the run + + Returns: + run (Run): Run + """ + return self.server.job_manager.get_job_by_id(job_id=run_id, actor=self.user) + + def delete_run(self, run_id: str) -> None: + """ + Delete a run by ID. + + Args: + run_id (str): ID of the run + """ + return self.server.job_manager.delete_job_by_id(job_id=run_id, actor=self.user) + + def list_runs(self) -> List[Run]: + """ + List all runs. + + Returns: + runs (List[Run]): List of runs + """ + return self.server.job_manager.list_jobs(actor=self.user, job_type=JobType.RUN) + + def list_active_runs(self) -> List[Run]: + """ + List all active runs. + + Returns: + runs (List[Run]): List of active runs + """ + return self.server.job_manager.list_jobs(actor=self.user, job_type=JobType.RUN, statuses=[JobStatus.created, JobStatus.running]) + + def get_tags( + self, + cursor: str = None, + limit: int = 100, + query_text: str = None, + ) -> List[str]: + """ + Get all tags. + + Returns: + tags (List[str]): List of tags + """ + return self.server.agent_manager.list_tags(actor=self.user, cursor=cursor, limit=limit, query_text=query_text) diff --git a/letta/constants.py b/letta/constants.py index d1a18e371a..0b46202aa2 100644 --- a/letta/constants.py +++ b/letta/constants.py @@ -12,6 +12,7 @@ COMPOSIO_TOOL_TAG_NAME = "composio" LETTA_CORE_TOOL_MODULE_NAME = "letta.functions.function_sets.base" +LETTA_MULTI_AGENT_TOOL_MODULE_NAME = "letta.functions.function_sets.multi_agent" # String in the error message for when the context window is too large # Example full message: @@ -48,6 +49,10 @@ BASE_TOOLS = ["send_message", "conversation_search", "archival_memory_insert", "archival_memory_search"] # Base memory tools CAN be edited, and are added by default by the server BASE_MEMORY_TOOLS = ["core_memory_append", "core_memory_replace"] +# Multi agent tools +MULTI_AGENT_TOOLS = ["send_message_to_specific_agent", "send_message_to_agents_matching_all_tags"] +MULTI_AGENT_SEND_MESSAGE_MAX_RETRIES = 3 +MULTI_AGENT_SEND_MESSAGE_TIMEOUT = 20 * 60 # The name of the tool used to send message to the user # May not be relevant in cases where the agent has multiple ways to message to user (send_imessage, send_discord_mesasge, ...) diff --git a/letta/functions/function_sets/multi_agent.py b/letta/functions/function_sets/multi_agent.py new file mode 100644 index 0000000000..015ac9c1ac --- /dev/null +++ b/letta/functions/function_sets/multi_agent.py @@ -0,0 +1,96 @@ +import asyncio +from typing import TYPE_CHECKING, List, Optional + +from letta.constants import MULTI_AGENT_SEND_MESSAGE_MAX_RETRIES, MULTI_AGENT_SEND_MESSAGE_TIMEOUT +from letta.functions.helpers import async_send_message_with_retries +from letta.orm.errors import NoResultFound +from letta.server.rest_api.utils import get_letta_server + +if TYPE_CHECKING: + from letta.agent import Agent + + +def send_message_to_specific_agent(self: "Agent", message: str, other_agent_id: str) -> Optional[str]: + """ + Send a message to a specific Letta agent within the same organization. + + Args: + message (str): The message to be sent to the target Letta agent. + other_agent_id (str): The identifier of the target Letta agent. + + Returns: + Optional[str]: The response from the Letta agent. It's possible that the agent does not respond. + """ + server = get_letta_server() + + # Ensure the target agent is in the same org + try: + server.agent_manager.get_agent_by_id(agent_id=other_agent_id, actor=self.user) + except NoResultFound: + raise ValueError( + f"The passed-in agent_id {other_agent_id} either does not exist, " + f"or does not belong to the same org ({self.user.organization_id})." + ) + + # Async logic to send a message with retries and timeout + async def async_send_single_agent(): + return await async_send_message_with_retries( + server=server, + sender_agent=self, + target_agent_id=other_agent_id, + message_text=message, + max_retries=MULTI_AGENT_SEND_MESSAGE_MAX_RETRIES, # or your chosen constants + timeout=MULTI_AGENT_SEND_MESSAGE_TIMEOUT, # e.g., 1200 for 20 min + logging_prefix="[send_message_to_specific_agent]", + ) + + # Run in the current event loop or create one if needed + try: + return asyncio.run(async_send_single_agent()) + except RuntimeError: + # e.g., in case there's already an active loop + loop = asyncio.get_event_loop() + if loop.is_running(): + return loop.run_until_complete(async_send_single_agent()) + else: + raise + + +def send_message_to_agents_matching_all_tags(self: "Agent", message: str, tags: List[str]) -> List[str]: + """ + Send a message to all agents in the same organization that match ALL of the given tags. + + Messages are sent in parallel for improved performance, with retries on flaky calls and timeouts for long-running requests. + This function does not use a cursor (pagination) and enforces a limit of 100 agents. + + Args: + message (str): The message to be sent to each matching agent. + tags (List[str]): The list of tags that each agent must have (match_all_tags=True). + + Returns: + List[str]: A list of responses from the agents that match all tags. + Each response corresponds to one agent. + """ + server = get_letta_server() + + # Retrieve agents that match ALL specified tags + matching_agents = server.agent_manager.list_agents(actor=self.user, tags=tags, match_all_tags=True, cursor=None, limit=100) + + async def send_messages_to_all_agents(): + tasks = [ + async_send_message_with_retries( + server=server, + sender_agent=self, + target_agent_id=agent_state.id, + message_text=message, + max_retries=MULTI_AGENT_SEND_MESSAGE_MAX_RETRIES, + timeout=MULTI_AGENT_SEND_MESSAGE_TIMEOUT, + logging_prefix="[send_message_to_agents_matching_all_tags]", + ) + for agent_state in matching_agents + ] + # Run all tasks in parallel + return await asyncio.gather(*tasks) + + # Run the async function and return results + return asyncio.run(send_messages_to_all_agents()) diff --git a/letta/functions/helpers.py b/letta/functions/helpers.py index c03751a21e..cbdb50012a 100644 --- a/letta/functions/helpers.py +++ b/letta/functions/helpers.py @@ -1,10 +1,15 @@ +import json from typing import Any, Optional, Union import humps from composio.constants import DEFAULT_ENTITY_ID from pydantic import BaseModel -from letta.constants import COMPOSIO_ENTITY_ENV_VAR_KEY +from letta.constants import COMPOSIO_ENTITY_ENV_VAR_KEY, DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG +from letta.schemas.enums import MessageRole +from letta.schemas.letta_message import AssistantMessage, ReasoningMessage, ToolCallMessage +from letta.schemas.letta_response import LettaResponse +from letta.schemas.message import MessageCreate def generate_composio_tool_wrapper(action_name: str) -> tuple[str, str]: @@ -206,3 +211,102 @@ def generate_import_code(module_attr_map: Optional[dict]): code_lines.append(f" # Access the {attr} from the module") code_lines.append(f" {attr} = getattr({module_name}, '{attr}')") return "\n".join(code_lines) + + +def parse_letta_response_for_assistant_message( + letta_response: LettaResponse, + assistant_message_tool_name: str = DEFAULT_MESSAGE_TOOL, + assistant_message_tool_kwarg: str = DEFAULT_MESSAGE_TOOL_KWARG, +) -> Optional[str]: + reasoning_message = "" + for m in letta_response.messages: + if isinstance(m, AssistantMessage): + return m.assistant_message + elif isinstance(m, ToolCallMessage) and m.tool_call.name == assistant_message_tool_name: + try: + return json.loads(m.tool_call.arguments)[assistant_message_tool_kwarg] + except Exception: # TODO: Make this more specific + continue + elif isinstance(m, ReasoningMessage): + # This is not ideal, but we would like to return something rather than nothing + reasoning_message += f"{m.reasoning}\n" + + return None + + +import asyncio +from random import uniform +from typing import Optional + + +async def async_send_message_with_retries( + server, + sender_agent: "Agent", + target_agent_id: str, + message_text: str, + max_retries: int, + timeout: int, + logging_prefix: Optional[str] = None, +) -> str: + """ + Shared helper coroutine to send a message to an agent with retries and a timeout. + + Args: + server: The Letta server instance (from get_letta_server()). + sender_agent (Agent): The agent initiating the send action. + target_agent_id (str): The ID of the agent to send the message to. + message_text (str): The text to send as the user message. + max_retries (int): Maximum number of retries for the request. + timeout (int): Maximum time to wait for a response (in seconds). + logging_prefix (str): A prefix to append to logging + Returns: + str: The response or an error message. + """ + logging_prefix = logging_prefix or "[async_send_message_with_retries]" + for attempt in range(1, max_retries + 1): + try: + messages = [MessageCreate(role=MessageRole.user, text=message_text, name=sender_agent.agent_state.name)] + # Wrap in a timeout + response = await asyncio.wait_for( + server.send_message_to_agent( + agent_id=target_agent_id, + actor=sender_agent.user, + messages=messages, + stream_steps=False, + stream_tokens=False, + use_assistant_message=True, + assistant_message_tool_name=DEFAULT_MESSAGE_TOOL, + assistant_message_tool_kwarg=DEFAULT_MESSAGE_TOOL_KWARG, + ), + timeout=timeout, + ) + + # Extract assistant message + assistant_message = parse_letta_response_for_assistant_message( + response, + assistant_message_tool_name=DEFAULT_MESSAGE_TOOL, + assistant_message_tool_kwarg=DEFAULT_MESSAGE_TOOL_KWARG, + ) + if assistant_message: + msg = f"Agent {target_agent_id} said '{assistant_message}'" + sender_agent.logger.info(f"{logging_prefix} - {msg}") + return msg + else: + msg = f"(No response from agent {target_agent_id})" + sender_agent.logger.info(f"{logging_prefix} - {msg}") + return msg + except asyncio.TimeoutError: + error_msg = f"(Timeout on attempt {attempt}/{max_retries} for agent {target_agent_id})" + sender_agent.logger.warning(f"{logging_prefix} - {error_msg}") + except Exception as e: + error_msg = f"(Error on attempt {attempt}/{max_retries} for agent {target_agent_id}: {e})" + sender_agent.logger.warning(f"{logging_prefix} - {error_msg}") + + # Exponential backoff before retrying + if attempt < max_retries: + backoff = uniform(0.5, 2) * (2**attempt) + sender_agent.logger.warning(f"{logging_prefix} - Retrying the agent to agent send_message...sleeping for {backoff}") + await asyncio.sleep(backoff) + else: + sender_agent.logger.error(f"{logging_prefix} - Fatal error during agent to agent send_message: {error_msg}") + return error_msg diff --git a/letta/functions/schema_generator.py b/letta/functions/schema_generator.py index 5ba9d2bfdc..1f33d87d61 100644 --- a/letta/functions/schema_generator.py +++ b/letta/functions/schema_generator.py @@ -1,4 +1,5 @@ import inspect +import warnings from typing import Any, Dict, List, Optional, Type, Union, get_args, get_origin from docstring_parser import parse @@ -44,6 +45,13 @@ def type_to_json_schema_type(py_type) -> dict: origin = get_origin(py_type) if py_type == list or origin in (list, List): args = get_args(py_type) + if len(args) == 0: + # is this correct + warnings.warn("Defaulting to string type for untyped List") + return { + "type": "array", + "items": {"type": "string"}, + } if args and inspect.isclass(args[0]) and issubclass(args[0], BaseModel): # If it's a list of Pydantic models, return an array with the model schema as items diff --git a/letta/llm_api/openai.py b/letta/llm_api/openai.py index bb3557568a..c335c6cb8c 100644 --- a/letta/llm_api/openai.py +++ b/letta/llm_api/openai.py @@ -307,15 +307,31 @@ def openai_chat_completions_process_stream( warnings.warn( f"Tool call index out of range ({tool_call_delta.index})\ncurrent tool calls: {accum_message.tool_calls}\ncurrent delta: {tool_call_delta}" ) + # force index 0 + # accum_message.tool_calls[0].id = tool_call_delta.id else: accum_message.tool_calls[tool_call_delta.index].id = tool_call_delta.id if tool_call_delta.function is not None: if tool_call_delta.function.name is not None: # TODO assert that we're not overwriting? # TODO += instead of =? - accum_message.tool_calls[tool_call_delta.index].function.name = tool_call_delta.function.name + if tool_call_delta.index not in range(len(accum_message.tool_calls)): + warnings.warn( + f"Tool call index out of range ({tool_call_delta.index})\ncurrent tool calls: {accum_message.tool_calls}\ncurrent delta: {tool_call_delta}" + ) + # force index 0 + # accum_message.tool_calls[0].function.name = tool_call_delta.function.name + else: + accum_message.tool_calls[tool_call_delta.index].function.name = tool_call_delta.function.name if tool_call_delta.function.arguments is not None: - accum_message.tool_calls[tool_call_delta.index].function.arguments += tool_call_delta.function.arguments + if tool_call_delta.index not in range(len(accum_message.tool_calls)): + warnings.warn( + f"Tool call index out of range ({tool_call_delta.index})\ncurrent tool calls: {accum_message.tool_calls}\ncurrent delta: {tool_call_delta}" + ) + # force index 0 + # accum_message.tool_calls[0].function.arguments += tool_call_delta.function.arguments + else: + accum_message.tool_calls[tool_call_delta.index].function.arguments += tool_call_delta.function.arguments if message_delta.function_call is not None: raise NotImplementedError(f"Old function_call style not support with stream=True") diff --git a/letta/local_llm/utils.py b/letta/local_llm/utils.py index b0529c358a..f5d5417401 100644 --- a/letta/local_llm/utils.py +++ b/letta/local_llm/utils.py @@ -122,6 +122,10 @@ def num_tokens_from_functions(functions: List[dict], model: str = "gpt-4"): for o in v["enum"]: function_tokens += 3 function_tokens += len(encoding.encode(o)) + elif field == "items": + function_tokens += 2 + if isinstance(v["items"], dict) and "type" in v["items"]: + function_tokens += len(encoding.encode(v["items"]["type"])) else: warnings.warn(f"num_tokens_from_functions: Unsupported field {field} in function {function}") function_tokens += 11 diff --git a/letta/orm/__init__.py b/letta/orm/__init__.py index f5f0e4780d..79185aa41f 100644 --- a/letta/orm/__init__.py +++ b/letta/orm/__init__.py @@ -5,6 +5,7 @@ from letta.orm.blocks_agents import BlocksAgents from letta.orm.file import FileMetadata from letta.orm.job import Job +from letta.orm.job_messages import JobMessage from letta.orm.message import Message from letta.orm.organization import Organization from letta.orm.passage import AgentPassage, BasePassage, SourcePassage diff --git a/letta/orm/enums.py b/letta/orm/enums.py index e9f7534905..aa7f800bfa 100644 --- a/letta/orm/enums.py +++ b/letta/orm/enums.py @@ -5,6 +5,12 @@ class ToolType(str, Enum): CUSTOM = "custom" LETTA_CORE = "letta_core" LETTA_MEMORY_CORE = "letta_memory_core" + LETTA_MULTI_AGENT_CORE = "letta_multi_agent_core" + + +class JobType(str, Enum): + JOB = "job" + RUN = "run" class ToolSourceType(str, Enum): diff --git a/letta/orm/job.py b/letta/orm/job.py index d95abe443f..95e67006f1 100644 --- a/letta/orm/job.py +++ b/letta/orm/job.py @@ -1,15 +1,20 @@ from datetime import datetime -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, List, Optional from sqlalchemy import JSON, String from sqlalchemy.orm import Mapped, mapped_column, relationship +from letta.orm.enums import JobType from letta.orm.mixins import UserMixin from letta.orm.sqlalchemy_base import SqlalchemyBase from letta.schemas.enums import JobStatus from letta.schemas.job import Job as PydanticJob +from letta.schemas.letta_request import LettaRequestConfig if TYPE_CHECKING: + from letta.orm.job_messages import JobMessage + from letta.orm.job_usage_statistics import JobUsageStatistics + from letta.orm.message import Message from letta.orm.user import User @@ -23,7 +28,24 @@ class Job(SqlalchemyBase, UserMixin): status: Mapped[JobStatus] = mapped_column(String, default=JobStatus.created, doc="The current status of the job.") completed_at: Mapped[Optional[datetime]] = mapped_column(nullable=True, doc="The unix timestamp of when the job was completed.") - metadata_: Mapped[Optional[dict]] = mapped_column(JSON, default=lambda: {}, doc="The metadata of the job.") + metadata_: Mapped[Optional[dict]] = mapped_column(JSON, doc="The metadata of the job.") + job_type: Mapped[JobType] = mapped_column( + String, + default=JobType.JOB, + doc="The type of job. This affects whether or not we generate json_schema and source_code on the fly.", + ) + request_config: Mapped[Optional[LettaRequestConfig]] = mapped_column( + JSON, nullable=True, doc="The request configuration for the job, stored as JSON." + ) # relationships user: Mapped["User"] = relationship("User", back_populates="jobs") + job_messages: Mapped[List["JobMessage"]] = relationship("JobMessage", back_populates="job", cascade="all, delete-orphan") + usage_statistics: Mapped[list["JobUsageStatistics"]] = relationship( + "JobUsageStatistics", back_populates="job", cascade="all, delete-orphan" + ) + + @property + def messages(self) -> List["Message"]: + """Get all messages associated with this job.""" + return [jm.message for jm in self.job_messages] diff --git a/letta/orm/job_messages.py b/letta/orm/job_messages.py new file mode 100644 index 0000000000..063febfc9f --- /dev/null +++ b/letta/orm/job_messages.py @@ -0,0 +1,33 @@ +from typing import TYPE_CHECKING + +from sqlalchemy import ForeignKey, UniqueConstraint +from sqlalchemy.orm import Mapped, mapped_column, relationship + +from letta.orm.sqlalchemy_base import SqlalchemyBase + +if TYPE_CHECKING: + from letta.orm.job import Job + from letta.orm.message import Message + + +class JobMessage(SqlalchemyBase): + """Tracks messages that were created during job execution.""" + + __tablename__ = "job_messages" + __table_args__ = (UniqueConstraint("job_id", "message_id", name="unique_job_message"),) + + id: Mapped[int] = mapped_column(primary_key=True, doc="Unique identifier for the job message") + job_id: Mapped[str] = mapped_column( + ForeignKey("jobs.id", ondelete="CASCADE"), + nullable=False, # A job message must belong to a job + doc="ID of the job that created the message", + ) + message_id: Mapped[str] = mapped_column( + ForeignKey("messages.id", ondelete="CASCADE"), + nullable=False, # A job message must have a message + doc="ID of the message created by the job", + ) + + # Relationships + job: Mapped["Job"] = relationship("Job", back_populates="job_messages") + message: Mapped["Message"] = relationship("Message", back_populates="job_message") diff --git a/letta/orm/job_usage_statistics.py b/letta/orm/job_usage_statistics.py new file mode 100644 index 0000000000..0a355d6970 --- /dev/null +++ b/letta/orm/job_usage_statistics.py @@ -0,0 +1,30 @@ +from typing import TYPE_CHECKING, Optional + +from sqlalchemy import ForeignKey +from sqlalchemy.orm import Mapped, mapped_column, relationship + +from letta.orm.sqlalchemy_base import SqlalchemyBase + +if TYPE_CHECKING: + from letta.orm.job import Job + + +class JobUsageStatistics(SqlalchemyBase): + """Tracks usage statistics for jobs, with future support for per-step tracking.""" + + __tablename__ = "job_usage_statistics" + + id: Mapped[int] = mapped_column(primary_key=True, doc="Unique identifier for the usage statistics entry") + job_id: Mapped[str] = mapped_column( + ForeignKey("jobs.id", ondelete="CASCADE"), nullable=False, doc="ID of the job these statistics belong to" + ) + step_id: Mapped[Optional[str]] = mapped_column( + nullable=True, doc="ID of the specific step within the job (for future per-step tracking)" + ) + completion_tokens: Mapped[int] = mapped_column(default=0, doc="Number of tokens generated by the agent") + prompt_tokens: Mapped[int] = mapped_column(default=0, doc="Number of tokens in the prompt") + total_tokens: Mapped[int] = mapped_column(default=0, doc="Total number of tokens processed by the agent") + step_count: Mapped[int] = mapped_column(default=0, doc="Number of steps taken by the agent") + + # Relationship back to the job + job: Mapped["Job"] = relationship("Job", back_populates="usage_statistics") diff --git a/letta/orm/message.py b/letta/orm/message.py index a8bbb90017..231462a42f 100644 --- a/letta/orm/message.py +++ b/letta/orm/message.py @@ -28,3 +28,13 @@ class Message(SqlalchemyBase, OrganizationMixin, AgentMixin): # Relationships agent: Mapped["Agent"] = relationship("Agent", back_populates="messages", lazy="selectin") organization: Mapped["Organization"] = relationship("Organization", back_populates="messages", lazy="selectin") + + # Job relationship + job_message: Mapped[Optional["JobMessage"]] = relationship( + "JobMessage", back_populates="message", uselist=False, cascade="all, delete-orphan", single_parent=True + ) + + @property + def job(self) -> Optional["Job"]: + """Get the job associated with this message, if any.""" + return self.job_message.job if self.job_message else None diff --git a/letta/orm/sqlalchemy_base.py b/letta/orm/sqlalchemy_base.py index dc3822760f..05ada679d0 100644 --- a/letta/orm/sqlalchemy_base.py +++ b/letta/orm/sqlalchemy_base.py @@ -1,9 +1,9 @@ from datetime import datetime from enum import Enum from functools import wraps -from typing import TYPE_CHECKING, List, Literal, Optional +from typing import TYPE_CHECKING, List, Literal, Optional, Tuple, Union -from sqlalchemy import String, desc, func, or_, select +from sqlalchemy import String, and_, desc, func, or_, select from sqlalchemy.exc import DBAPIError, IntegrityError, TimeoutError from sqlalchemy.orm import Mapped, Session, mapped_column @@ -61,6 +61,11 @@ def list( ascending: bool = True, tags: Optional[List[str]] = None, match_all_tags: bool = False, + actor: Optional["User"] = None, + access: Optional[List[Literal["read", "write", "admin"]]] = ["read"], + access_type: AccessType = AccessType.ORGANIZATION, + join_model: Optional[Base] = None, + join_conditions: Optional[Union[Tuple, List]] = None, **kwargs, ) -> List["SqlalchemyBase"]: """ @@ -94,6 +99,13 @@ def list( query = select(cls) + if join_model and join_conditions: + query = query.join(join_model, and_(*join_conditions)) + + # Apply access predicate if actor is provided + if actor: + query = cls.apply_access_predicate(query, actor, access, access_type) + # Handle tag filtering if the model has tags if tags and hasattr(cls, "tags"): query = select(cls) @@ -118,7 +130,15 @@ def list( # Apply filtering logic from kwargs for key, value in kwargs.items(): - column = getattr(cls, key) + if "." in key: + # Handle joined table columns + table_name, column_name = key.split(".") + joined_table = locals().get(table_name) or globals().get(table_name) + column = getattr(joined_table, column_name) + else: + # Handle columns from main table + column = getattr(cls, key) + if isinstance(value, (list, tuple, set)): query = query.where(column.in_(value)) else: @@ -143,7 +163,11 @@ def list( # Text search if query_text: - query = query.filter(func.lower(cls.text).contains(func.lower(query_text))) + if hasattr(cls, "text"): + query = query.filter(func.lower(cls.text).contains(func.lower(query_text))) + elif hasattr(cls, "name"): + # Special case for Agent model - search across name + query = query.filter(func.lower(cls.name).contains(func.lower(query_text))) # Embedding search (for Passages) is_ordered = False diff --git a/letta/orm/tool.py b/letta/orm/tool.py index 9d744f44de..0b443d27c7 100644 --- a/letta/orm/tool.py +++ b/letta/orm/tool.py @@ -40,9 +40,6 @@ class Tool(SqlalchemyBase, OrganizationMixin): source_type: Mapped[ToolSourceType] = mapped_column(String, doc="The type of the source code.", default=ToolSourceType.json) source_code: Mapped[Optional[str]] = mapped_column(String, doc="The source code of the function.") json_schema: Mapped[Optional[dict]] = mapped_column(JSON, default=lambda: {}, doc="The OAI compatable JSON schema of the function.") - module: Mapped[Optional[str]] = mapped_column( - String, nullable=True, doc="the module path from which this tool was derived in the codebase." - ) # relationships organization: Mapped["Organization"] = relationship("Organization", back_populates="tools", lazy="selectin") diff --git a/letta/schemas/agent.py b/letta/schemas/agent.py index 16b051f034..697734bd84 100644 --- a/letta/schemas/agent.py +++ b/letta/schemas/agent.py @@ -95,8 +95,8 @@ class CreateAgent(BaseModel, validate_assignment=True): # name: str = Field(default_factory=lambda: create_random_username(), description="The name of the agent.") # memory creation - memory_blocks: List[CreateBlock] = Field( - ..., + memory_blocks: Optional[List[CreateBlock]] = Field( + None, description="The blocks to create in the agent's in-context memory.", ) # TODO: This is a legacy field and should be removed ASAP to force `tool_ids` usage @@ -115,7 +115,12 @@ class CreateAgent(BaseModel, validate_assignment=True): # initial_message_sequence: Optional[List[MessageCreate]] = Field( None, description="The initial set of messages to put in the agent's in-context memory." ) - include_base_tools: bool = Field(True, description="The LLM configuration used by the agent.") + include_base_tools: bool = Field( + True, description="If true, attaches the Letta core tools (e.g. archival_memory and core_memory related functions)." + ) + include_multi_agent_tools: bool = Field( + False, description="If true, attaches the Letta multi-agent tools (e.g. sending a message to another agent)." + ) description: Optional[str] = Field(None, description="The description of the agent.") metadata_: Optional[Dict] = Field(None, description="The metadata of the agent.", alias="metadata_") llm: Optional[str] = Field( @@ -129,7 +134,8 @@ class CreateAgent(BaseModel, validate_assignment=True): # context_window_limit: Optional[int] = Field(None, description="The context window limit used by the agent.") embedding_chunk_size: Optional[int] = Field(DEFAULT_EMBEDDING_CHUNK_SIZE, description="The embedding chunk size used by the agent.") from_template: Optional[str] = Field(None, description="The template id used to configure the agent") - project_id: Optional[str] = Field(None, description="The project id that the agent will be associated with.") + template: bool = Field(False, description="Whether the agent is a template") + project: Optional[str] = Field(None, description="The project slug that the agent will be associated with.") tool_exec_environment_variables: Optional[Dict[str, str]] = Field( None, description="The environment variables for tool execution specific to this agent." ) diff --git a/letta/schemas/job.py b/letta/schemas/job.py index 17c2b98dad..c61c58399f 100644 --- a/letta/schemas/job.py +++ b/letta/schemas/job.py @@ -3,6 +3,7 @@ from pydantic import Field +from letta.orm.enums import JobType from letta.schemas.enums import JobStatus from letta.schemas.letta_base import OrmMetadataBase @@ -12,6 +13,7 @@ class JobBase(OrmMetadataBase): status: JobStatus = Field(default=JobStatus.created, description="The status of the job.") completed_at: Optional[datetime] = Field(None, description="The unix timestamp of when the job was completed.") metadata_: Optional[dict] = Field(None, description="The metadata of the job.") + job_type: JobType = Field(default=JobType.JOB, description="The type of the job.") class Job(JobBase): diff --git a/letta/schemas/letta_base.py b/letta/schemas/letta_base.py index dce2b02ded..bb29a5be41 100644 --- a/letta/schemas/letta_base.py +++ b/letta/schemas/letta_base.py @@ -52,8 +52,13 @@ def _generate_id(cls, prefix: Optional[str] = None) -> str: @classmethod def _id_regex_pattern(cls, prefix: str): """generates the regex pattern for a given id""" + if cls.__name__ in ("JobBase", "Job", "Run", "RunBase"): + prefix_pattern = "(job|run)" + else: + prefix_pattern = prefix + return ( - r"^" + prefix + r"-" # prefix string + r"^" + prefix_pattern + r"-" # prefix string r"[a-fA-F0-9]{8}" # 8 hexadecimal characters # r"[a-fA-F0-9]{4}-" # 4 hexadecimal characters # r"[a-fA-F0-9]{4}-" # 4 hexadecimal characters diff --git a/letta/schemas/letta_request.py b/letta/schemas/letta_request.py index f1f8f450df..663dba14a8 100644 --- a/letta/schemas/letta_request.py +++ b/letta/schemas/letta_request.py @@ -6,11 +6,8 @@ from letta.schemas.message import MessageCreate -class LettaRequest(BaseModel): - messages: List[MessageCreate] = Field(..., description="The messages to be sent to the agent.") - +class LettaRequestConfig(BaseModel): # Flags to support the use of AssistantMessage message types - use_assistant_message: bool = Field( default=True, description="Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.", @@ -25,6 +22,11 @@ class LettaRequest(BaseModel): ) +class LettaRequest(BaseModel): + messages: List[MessageCreate] = Field(..., description="The messages to be sent to the agent.") + config: LettaRequestConfig = Field(default=LettaRequestConfig(), description="Configuration options for the LettaRequest.") + + class LettaStreamingRequest(LettaRequest): stream_tokens: bool = Field( default=False, diff --git a/letta/schemas/llm_config.py b/letta/schemas/llm_config.py index 0be4f81887..970579ea9e 100644 --- a/letta/schemas/llm_config.py +++ b/letta/schemas/llm_config.py @@ -96,7 +96,7 @@ def default_config(cls, model_name: str): model="memgpt-openai", model_endpoint_type="openai", model_endpoint="https://inference.memgpt.ai", - context_window=16384, + context_window=8192, ) else: raise ValueError(f"Model {model_name} not supported.") diff --git a/letta/schemas/message.py b/letta/schemas/message.py index ea46f3f870..df09aa254e 100644 --- a/letta/schemas/message.py +++ b/letta/schemas/message.py @@ -149,9 +149,9 @@ def to_letta_message( # We need to unpack the actual message contents from the function call try: func_args = json.loads(tool_call.function.arguments) - message_string = func_args[DEFAULT_MESSAGE_TOOL_KWARG] + message_string = func_args[assistant_message_tool_kwarg] except KeyError: - raise ValueError(f"Function call {tool_call.function.name} missing {DEFAULT_MESSAGE_TOOL_KWARG} argument") + raise ValueError(f"Function call {tool_call.function.name} missing {assistant_message_tool_kwarg} argument") messages.append( AssistantMessage( id=self.id, @@ -708,8 +708,6 @@ def to_cohere_dict( }, ] for tc in self.tool_calls: - # TODO better way to pack? - # function_call_text = json.dumps(tc.to_dict()) function_name = tc.function["name"] function_args = json.loads(tc.function["arguments"]) function_args_str = ",".join([f"{k}={v}" for k, v in function_args.items()]) diff --git a/letta/schemas/providers.py b/letta/schemas/providers.py index 5ab6cad713..407418a951 100644 --- a/letta/schemas/providers.py +++ b/letta/schemas/providers.py @@ -63,7 +63,7 @@ def list_llm_models(self) -> List[LLMConfig]: model="letta-free", # NOTE: renamed model_endpoint_type="openai", model_endpoint="https://inference.memgpt.ai", - context_window=16384, + context_window=8192, handle=self.get_handle("letta-free"), ) ] diff --git a/letta/schemas/run.py b/letta/schemas/run.py new file mode 100644 index 0000000000..b455a211f6 --- /dev/null +++ b/letta/schemas/run.py @@ -0,0 +1,61 @@ +from typing import Optional + +from pydantic import Field + +from letta.orm.enums import JobType +from letta.schemas.job import Job, JobBase +from letta.schemas.letta_request import LettaRequestConfig + + +class RunBase(JobBase): + """Base class for Run schemas that inherits from JobBase but uses 'run' prefix for IDs""" + + __id_prefix__ = "run" + job_type: JobType = JobType.RUN + + +class Run(RunBase): + """ + Representation of a run, which is a job with a 'run' prefix in its ID. + Inherits all fields and behavior from Job except for the ID prefix. + + Parameters: + id (str): The unique identifier of the run (prefixed with 'run-'). + status (JobStatus): The status of the run. + created_at (datetime): The unix timestamp of when the run was created. + completed_at (datetime): The unix timestamp of when the run was completed. + user_id (str): The unique identifier of the user associated with the run. + """ + + id: str = RunBase.generate_id_field() + user_id: Optional[str] = Field(None, description="The unique identifier of the user associated with the run.") + request_config: Optional[LettaRequestConfig] = Field(None, description="The request configuration for the run.") + + @classmethod + def from_job(cls, job: Job) -> "Run": + """ + Convert a Job instance to a Run instance by replacing the ID prefix. + All other fields are copied as-is. + + Args: + job: The Job instance to convert + + Returns: + A new Run instance with the same data but 'run-' prefix in ID + """ + # Convert job dict to exclude None values + job_data = job.model_dump(exclude_none=True) + + # Create new Run instance with converted data + return cls(**job_data) + + def to_job(self) -> Job: + """ + Convert this Run instance to a Job instance by replacing the ID prefix. + All other fields are copied as-is. + + Returns: + A new Job instance with the same data but 'job-' prefix in ID + """ + run_data = self.model_dump(exclude_none=True) + return Job(**run_data) diff --git a/letta/schemas/tool.py b/letta/schemas/tool.py index 5c38467e06..610685b45b 100644 --- a/letta/schemas/tool.py +++ b/letta/schemas/tool.py @@ -2,13 +2,17 @@ from pydantic import Field, model_validator -from letta.constants import COMPOSIO_TOOL_TAG_NAME, FUNCTION_RETURN_CHAR_LIMIT, LETTA_CORE_TOOL_MODULE_NAME +from letta.constants import ( + COMPOSIO_TOOL_TAG_NAME, + FUNCTION_RETURN_CHAR_LIMIT, + LETTA_CORE_TOOL_MODULE_NAME, + LETTA_MULTI_AGENT_TOOL_MODULE_NAME, +) from letta.functions.functions import derive_openai_json_schema, get_json_schema_from_module from letta.functions.helpers import generate_composio_tool_wrapper, generate_langchain_tool_wrapper from letta.functions.schema_generator import generate_schema_from_args_schema_v2 from letta.orm.enums import ToolType from letta.schemas.letta_base import LettaBase -from letta.schemas.openai.chat_completions import ToolCall class BaseTool(LettaBase): @@ -32,7 +36,6 @@ class Tool(BaseTool): tool_type: ToolType = Field(ToolType.CUSTOM, description="The type of the tool.") description: Optional[str] = Field(None, description="The description of the tool.") source_type: Optional[str] = Field(None, description="The type of the source code.") - module: Optional[str] = Field(None, description="The module of the function.") organization_id: Optional[str] = Field(None, description="The unique identifier of the organization associated with the tool.") name: Optional[str] = Field(None, description="The name of the function.") tags: List[str] = Field([], description="Metadata tags.") @@ -66,6 +69,9 @@ def populate_missing_fields(self): elif self.tool_type in {ToolType.LETTA_CORE, ToolType.LETTA_MEMORY_CORE}: # If it's letta core tool, we generate the json_schema on the fly here self.json_schema = get_json_schema_from_module(module_name=LETTA_CORE_TOOL_MODULE_NAME, function_name=self.name) + elif self.tool_type in {ToolType.LETTA_MULTI_AGENT_CORE}: + # If it's letta multi-agent tool, we also generate the json_schema on the fly here + self.json_schema = get_json_schema_from_module(module_name=LETTA_MULTI_AGENT_TOOL_MODULE_NAME, function_name=self.name) # Derive name from the JSON schema if not provided if not self.name: @@ -81,24 +87,11 @@ def populate_missing_fields(self): return self - def to_dict(self): - """ - Convert tool into OpenAI representation. - """ - return vars( - ToolCall( - tool_id=self.id, - tool_call_type="function", - function=self.module, - ) - ) - class ToolCreate(LettaBase): name: Optional[str] = Field(None, description="The name of the function (auto-generated from source_code if not provided).") description: Optional[str] = Field(None, description="The description of the tool.") tags: List[str] = Field([], description="Metadata tags.") - module: Optional[str] = Field(None, description="The source code of the function.") source_code: str = Field(..., description="The source code of the function.") source_type: str = Field("python", description="The source type of the function.") json_schema: Optional[Dict] = Field( @@ -212,7 +205,6 @@ class ToolUpdate(LettaBase): description: Optional[str] = Field(None, description="The description of the tool.") name: Optional[str] = Field(None, description="The name of the function.") tags: Optional[List[str]] = Field(None, description="Metadata tags.") - module: Optional[str] = Field(None, description="The source code of the function.") source_code: Optional[str] = Field(None, description="The source code of the function.") source_type: Optional[str] = Field(None, description="The type of the source code.") json_schema: Optional[Dict] = Field( diff --git a/letta/server/rest_api/interface.py b/letta/server/rest_api/interface.py index 339ff38c01..995593aadb 100644 --- a/letta/server/rest_api/interface.py +++ b/letta/server/rest_api/interface.py @@ -281,6 +281,9 @@ def __init__( # turn function argument to send_message into a normal text stream self.streaming_chat_completion_json_reader = FunctionArgumentsStreamHandler(json_key=assistant_message_tool_kwarg) + # Store metadata passed from server + self.metadata = {} + self._chunks = deque() self._event = asyncio.Event() # Use an event to notify when chunks are available self._active = True # This should be set to False to stop the generator diff --git a/letta/server/rest_api/routers/openai/chat_completions/chat_completions.py b/letta/server/rest_api/routers/openai/chat_completions/chat_completions.py index 4809fa19df..f1485da3c7 100644 --- a/letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +++ b/letta/server/rest_api/routers/openai/chat_completions/chat_completions.py @@ -3,13 +3,11 @@ from fastapi import APIRouter, Body, Depends, Header, HTTPException -from letta.schemas.enums import MessageRole from letta.schemas.letta_message import LettaMessage, ToolCall from letta.schemas.openai.chat_completion_request import ChatCompletionRequest from letta.schemas.openai.chat_completion_response import ChatCompletionResponse, Choice, Message, UsageStatistics # TODO this belongs in a controller! -from letta.server.rest_api.routers.v1.agents import send_message_to_agent from letta.server.rest_api.utils import get_letta_server if TYPE_CHECKING: @@ -52,12 +50,10 @@ async def create_chat_completion( # TODO(charles) support multimodal parts assert isinstance(input_message.content, str) - return await send_message_to_agent( - server=server, + return await server.send_message_to_agent( agent_id=agent_id, - user_id=actor.id, - role=MessageRole(input_message.role), - message=input_message.content, + actor=actor, + message=input_message.content, # TODO: This is broken # Turn streaming ON stream_steps=True, stream_tokens=True, @@ -71,12 +67,10 @@ async def create_chat_completion( # TODO(charles) support multimodal parts assert isinstance(input_message.content, str) - response_messages = await send_message_to_agent( - server=server, + response_messages = await server.send_message_to_agent( agent_id=agent_id, - user_id=actor.id, - role=MessageRole(input_message.role), - message=input_message.content, + actor=actor, + message=input_message.content, # TODO: This is broken # Turn streaming OFF stream_steps=False, stream_tokens=False, diff --git a/letta/server/rest_api/routers/v1/__init__.py b/letta/server/rest_api/routers/v1/__init__.py index 6fa75fb9f8..5611c05541 100644 --- a/letta/server/rest_api/routers/v1/__init__.py +++ b/letta/server/rest_api/routers/v1/__init__.py @@ -4,8 +4,10 @@ from letta.server.rest_api.routers.v1.jobs import router as jobs_router from letta.server.rest_api.routers.v1.llms import router as llm_router from letta.server.rest_api.routers.v1.providers import router as providers_router +from letta.server.rest_api.routers.v1.runs import router as runs_router from letta.server.rest_api.routers.v1.sandbox_configs import router as sandbox_configs_router from letta.server.rest_api.routers.v1.sources import router as sources_router +from letta.server.rest_api.routers.v1.tags import router as tags_router from letta.server.rest_api.routers.v1.tools import router as tools_router ROUTERS = [ @@ -18,4 +20,6 @@ health_router, sandbox_configs_router, providers_router, + runs_router, + tags_router, ] diff --git a/letta/server/rest_api/routers/v1/agents.py b/letta/server/rest_api/routers/v1/agents.py index 5ab1934803..53b0c2904f 100644 --- a/letta/server/rest_api/routers/v1/agents.py +++ b/letta/server/rest_api/routers/v1/agents.py @@ -1,10 +1,8 @@ -import asyncio -import warnings from datetime import datetime from typing import List, Optional, Union from fastapi import APIRouter, BackgroundTasks, Body, Depends, Header, HTTPException, Query, status -from fastapi.responses import JSONResponse, StreamingResponse +from fastapi.responses import JSONResponse from pydantic import Field from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG @@ -12,19 +10,18 @@ from letta.orm.errors import NoResultFound from letta.schemas.agent import AgentState, CreateAgent, UpdateAgent from letta.schemas.block import Block, BlockUpdate, CreateBlock # , BlockLabelUpdate, BlockLimitUpdate -from letta.schemas.enums import MessageStreamStatus -from letta.schemas.job import Job, JobStatus, JobUpdate -from letta.schemas.letta_message import LegacyLettaMessage, LettaMessage, LettaMessageUnion +from letta.schemas.job import JobStatus, JobUpdate +from letta.schemas.letta_message import LettaMessageUnion from letta.schemas.letta_request import LettaRequest, LettaStreamingRequest from letta.schemas.letta_response import LettaResponse from letta.schemas.memory import ArchivalMemorySummary, ContextWindowOverview, CreateArchivalMemory, Memory, RecallMemorySummary -from letta.schemas.message import Message, MessageCreate, MessageUpdate +from letta.schemas.message import Message, MessageUpdate from letta.schemas.passage import Passage +from letta.schemas.run import Run from letta.schemas.source import Source from letta.schemas.tool import Tool from letta.schemas.user import User -from letta.server.rest_api.interface import StreamingServerInterface -from letta.server.rest_api.utils import get_letta_server, sse_async_generator +from letta.server.rest_api.utils import get_letta_server from letta.server.server import SyncServer # These can be forward refs, but because Fastapi needs them at runtime the must be imported normally @@ -46,9 +43,9 @@ def list_agents( ), server: "SyncServer" = Depends(get_letta_server), user_id: Optional[str] = Header(None, alias="user_id"), - cursor: Optional[int] = Query(None, description="Cursor for pagination"), + cursor: Optional[str] = Query(None, description="Cursor for pagination"), limit: Optional[int] = Query(None, description="Limit for pagination"), - # Extract user_id from header, default to None if not present + query_text: Optional[str] = Query(None, description="Search agents by name"), ): """ List all agents associated with a given user. @@ -63,6 +60,7 @@ def list_agents( "tags": tags, "match_all_tags": match_all_tags, "name": name, + "query_text": query_text, }.items() if value is not None } @@ -155,6 +153,18 @@ def remove_tool_from_agent( return server.agent_manager.detach_tool(agent_id=agent_id, tool_id=tool_id, actor=actor) +@router.patch("/{agent_id}/reset-messages", response_model=AgentState, operation_id="reset_messages") +def reset_messages( + agent_id: str, + add_default_initial_messages: bool = Query(default=False, description="If true, adds the default initial messages after resetting."), + server: "SyncServer" = Depends(get_letta_server), + user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present +): + """Resets the messages for an agent""" + actor = server.user_manager.get_user_or_default(user_id=user_id) + return server.agent_manager.reset_messages(agent_id=agent_id, actor=actor, add_default_initial_messages=add_default_initial_messages) + + @router.get("/{agent_id}", response_model=AgentState, operation_id="get_agent") def get_agent_state( agent_id: str, @@ -485,17 +495,16 @@ async def send_message( This endpoint accepts a message from a user and processes it through the agent. """ actor = server.user_manager.get_user_or_default(user_id=user_id) - result = await send_message_to_agent( - server=server, + result = await server.send_message_to_agent( agent_id=agent_id, actor=actor, messages=request.messages, stream_steps=False, stream_tokens=False, # Support for AssistantMessage - use_assistant_message=request.use_assistant_message, - assistant_message_tool_name=request.assistant_message_tool_name, - assistant_message_tool_kwarg=request.assistant_message_tool_kwarg, + use_assistant_message=request.config.use_assistant_message, + assistant_message_tool_name=request.config.assistant_message_tool_name, + assistant_message_tool_kwarg=request.config.assistant_message_tool_kwarg, ) return result @@ -526,16 +535,16 @@ async def send_message_streaming( """ actor = server.user_manager.get_user_or_default(user_id=user_id) - result = await send_message_to_agent( - server=server, + result = await server.send_message_to_agent( agent_id=agent_id, actor=actor, messages=request.messages, stream_steps=True, stream_tokens=request.stream_tokens, # Support for AssistantMessage - assistant_message_tool_name=request.assistant_message_tool_name, - assistant_message_tool_kwarg=request.assistant_message_tool_kwarg, + use_assistant_message=request.config.use_assistant_message, + assistant_message_tool_name=request.config.assistant_message_tool_name, + assistant_message_tool_kwarg=request.config.assistant_message_tool_kwarg, ) return result @@ -546,21 +555,23 @@ async def process_message_background( actor: User, agent_id: str, messages: list, + use_assistant_message: bool, assistant_message_tool_name: str, assistant_message_tool_kwarg: str, ) -> None: """Background task to process the message and update job status.""" try: # TODO(matt) we should probably make this stream_steps and log each step as it progresses, so the job update GET can see the total steps so far + partial usage? - result = await send_message_to_agent( - server=server, + result = await server.send_message_to_agent( agent_id=agent_id, actor=actor, messages=messages, stream_steps=False, # NOTE(matt) stream_tokens=False, + use_assistant_message=use_assistant_message, assistant_message_tool_name=assistant_message_tool_name, assistant_message_tool_kwarg=assistant_message_tool_kwarg, + metadata={"job_id": job_id}, # Pass job_id through metadata ) # Update job status to completed @@ -571,6 +582,9 @@ async def process_message_background( ) server.job_manager.update_job_by_id(job_id=job_id, job_update=job_update, actor=actor) + # Add job usage statistics + server.job_manager.add_job_usage(job_id=job_id, usage=result.usage, actor=actor) + except Exception as e: # Update job status to failed job_update = JobUpdate( @@ -584,7 +598,7 @@ async def process_message_background( @router.post( "/{agent_id}/messages/async", - response_model=Job, + response_model=Run, operation_id="create_agent_message_async", ) async def send_message_async( @@ -595,152 +609,34 @@ async def send_message_async( user_id: Optional[str] = Header(None, alias="user_id"), ): """ - Asynchronously process a user message and return a job ID. - The actual processing happens in the background, and the status can be checked using the job ID. + Asynchronously process a user message and return a run object. + The actual processing happens in the background, and the status can be checked using the run ID. """ actor = server.user_manager.get_user_or_default(user_id=user_id) # Create a new job - job = Job( + run = Run( user_id=actor.id, status=JobStatus.created, metadata_={ "job_type": "send_message_async", "agent_id": agent_id, }, + request_config=request.config, ) - job = server.job_manager.create_job(pydantic_job=job, actor=actor) + run = server.job_manager.create_job(pydantic_job=run, actor=actor) # Add the background task background_tasks.add_task( process_message_background, - job_id=job.id, + job_id=run.id, server=server, actor=actor, agent_id=agent_id, messages=request.messages, - assistant_message_tool_name=request.assistant_message_tool_name, - assistant_message_tool_kwarg=request.assistant_message_tool_kwarg, + use_assistant_message=request.config.use_assistant_message, + assistant_message_tool_name=request.config.assistant_message_tool_name, + assistant_message_tool_kwarg=request.config.assistant_message_tool_kwarg, ) - return job - - -# TODO: move this into server.py? -async def send_message_to_agent( - server: SyncServer, - agent_id: str, - actor: User, - # role: MessageRole, - messages: Union[List[Message], List[MessageCreate]], - stream_steps: bool, - stream_tokens: bool, - # related to whether or not we return `LettaMessage`s or `Message`s - chat_completion_mode: bool = False, - timestamp: Optional[datetime] = None, - # Support for AssistantMessage - use_assistant_message: bool = True, - assistant_message_tool_name: str = DEFAULT_MESSAGE_TOOL, - assistant_message_tool_kwarg: str = DEFAULT_MESSAGE_TOOL_KWARG, -) -> Union[StreamingResponse, LettaResponse]: - """Split off into a separate function so that it can be imported in the /chat/completion proxy.""" - - # TODO: @charles is this the correct way to handle? - include_final_message = True - - if not stream_steps and stream_tokens: - raise HTTPException(status_code=400, detail="stream_steps must be 'true' if stream_tokens is 'true'") - - # For streaming response - try: - - # TODO: move this logic into server.py - - # Get the generator object off of the agent's streaming interface - # This will be attached to the POST SSE request used under-the-hood - letta_agent = server.load_agent(agent_id=agent_id, actor=actor) - - # Disable token streaming if not OpenAI - # TODO: cleanup this logic - llm_config = letta_agent.agent_state.llm_config - if stream_tokens and (llm_config.model_endpoint_type != "openai" or "inference.memgpt.ai" in llm_config.model_endpoint): - warnings.warn( - "Token streaming is only supported for models with type 'openai' or `inference.memgpt.ai` in the model_endpoint: agent has endpoint type {llm_config.model_endpoint_type} and {llm_config.model_endpoint}. Setting stream_tokens to False." - ) - stream_tokens = False - - # Create a new interface per request - letta_agent.interface = StreamingServerInterface(use_assistant_message) - streaming_interface = letta_agent.interface - if not isinstance(streaming_interface, StreamingServerInterface): - raise ValueError(f"Agent has wrong type of interface: {type(streaming_interface)}") - - # Enable token-streaming within the request if desired - streaming_interface.streaming_mode = stream_tokens - # "chatcompletion mode" does some remapping and ignores inner thoughts - streaming_interface.streaming_chat_completion_mode = chat_completion_mode - - # streaming_interface.allow_assistant_message = stream - # streaming_interface.function_call_legacy_mode = stream - - # Allow AssistantMessage is desired by client - streaming_interface.assistant_message_tool_name = assistant_message_tool_name - streaming_interface.assistant_message_tool_kwarg = assistant_message_tool_kwarg - - # Related to JSON buffer reader - streaming_interface.inner_thoughts_in_kwargs = ( - llm_config.put_inner_thoughts_in_kwargs if llm_config.put_inner_thoughts_in_kwargs is not None else False - ) - - # Offload the synchronous message_func to a separate thread - streaming_interface.stream_start() - task = asyncio.create_task( - asyncio.to_thread( - server.send_messages, - actor=actor, - agent_id=agent_id, - messages=messages, - interface=streaming_interface, - ) - ) - - if stream_steps: - # return a stream - return StreamingResponse( - sse_async_generator( - streaming_interface.get_generator(), - usage_task=task, - finish_message=include_final_message, - ), - media_type="text/event-stream", - ) - - else: - # buffer the stream, then return the list - generated_stream = [] - async for message in streaming_interface.get_generator(): - assert ( - isinstance(message, LettaMessage) or isinstance(message, LegacyLettaMessage) or isinstance(message, MessageStreamStatus) - ), type(message) - generated_stream.append(message) - if message == MessageStreamStatus.done: - break - - # Get rid of the stream status messages - filtered_stream = [d for d in generated_stream if not isinstance(d, MessageStreamStatus)] - usage = await task - - # By default the stream will be messages of type LettaMessage or LettaLegacyMessage - # If we want to convert these to Message, we can use the attached IDs - # NOTE: we will need to de-duplicate the Messsage IDs though (since Assistant->Inner+Func_Call) - # TODO: eventually update the interface to use `Message` and `MessageChunk` (new) inside the deque instead - return LettaResponse(messages=filtered_stream, usage=usage) - - except HTTPException: - raise - except Exception as e: - print(e) - import traceback - - traceback.print_exc() - raise HTTPException(status_code=500, detail=f"{e}") + return run diff --git a/letta/server/rest_api/routers/v1/runs.py b/letta/server/rest_api/routers/v1/runs.py new file mode 100644 index 0000000000..34cbb889b7 --- /dev/null +++ b/letta/server/rest_api/routers/v1/runs.py @@ -0,0 +1,137 @@ +from typing import List, Optional + +from fastapi import APIRouter, Depends, Header, HTTPException, Query + +from letta.orm.enums import JobType +from letta.orm.errors import NoResultFound +from letta.schemas.enums import JobStatus, MessageRole +from letta.schemas.letta_message import LettaMessageUnion +from letta.schemas.openai.chat_completion_response import UsageStatistics +from letta.schemas.run import Run +from letta.server.rest_api.utils import get_letta_server +from letta.server.server import SyncServer + +router = APIRouter(prefix="/runs", tags=["runs"]) + + +@router.get("/", response_model=List[Run], operation_id="list_runs") +def list_runs( + server: "SyncServer" = Depends(get_letta_server), + user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present +): + """ + List all runs. + """ + actor = server.user_manager.get_user_or_default(user_id=user_id) + + return [Run.from_job(job) for job in server.job_manager.list_jobs(actor=actor, job_type=JobType.RUN)] + + +@router.get("/active", response_model=List[Run], operation_id="list_active_runs") +def list_active_runs( + server: "SyncServer" = Depends(get_letta_server), + user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present +): + """ + List all active runs. + """ + actor = server.user_manager.get_user_or_default(user_id=user_id) + + active_runs = server.job_manager.list_jobs(actor=actor, statuses=[JobStatus.created, JobStatus.running], job_type=JobType.RUN) + + return [Run.from_job(job) for job in active_runs] + + +@router.get("/{run_id}", response_model=Run, operation_id="get_run") +def get_run( + run_id: str, + user_id: Optional[str] = Header(None, alias="user_id"), + server: "SyncServer" = Depends(get_letta_server), +): + """ + Get the status of a run. + """ + actor = server.user_manager.get_user_or_default(user_id=user_id) + + try: + job = server.job_manager.get_job_by_id(job_id=run_id, actor=actor) + return Run.from_job(job) + except NoResultFound: + raise HTTPException(status_code=404, detail="Run not found") + + +@router.get("/{run_id}/messages", response_model=List[LettaMessageUnion], operation_id="get_run_messages") +async def get_run_messages( + run_id: str, + server: "SyncServer" = Depends(get_letta_server), + user_id: Optional[str] = Header(None, alias="user_id"), + cursor: Optional[str] = Query(None, description="Cursor for pagination"), + limit: Optional[int] = Query(100, description="Maximum number of messages to return"), + ascending: bool = Query(True, description="Sort order by creation time"), + role: Optional[MessageRole] = Query(None, description="Filter by role"), +): + """ + Get messages associated with a run with filtering options. + + Args: + run_id: ID of the run + cursor: Cursor for pagination + limit: Maximum number of messages to return + ascending: Sort order by creation time + role: Filter by role (user/assistant/system/tool) + return_message_object: Whether to return Message objects or LettaMessage objects + user_id: ID of the user making the request + + Returns: + A list of messages associated with the run. Default is List[LettaMessage]. + """ + actor = server.user_manager.get_user_or_default(user_id=user_id) + + try: + messages = server.job_manager.get_run_messages_cursor( + run_id=run_id, + actor=actor, + limit=limit, + cursor=cursor, + ascending=ascending, + role=role, + ) + return messages + except NoResultFound as e: + raise HTTPException(status_code=404, detail=str(e)) + + +@router.get("/{run_id}/usage", response_model=UsageStatistics, operation_id="get_run_usage") +def get_run_usage( + run_id: str, + user_id: Optional[str] = Header(None, alias="user_id"), + server: "SyncServer" = Depends(get_letta_server), +): + """ + Get usage statistics for a run. + """ + actor = server.user_manager.get_user_or_default(user_id=user_id) + + try: + usage = server.job_manager.get_job_usage(job_id=run_id, actor=actor) + return usage + except NoResultFound: + raise HTTPException(status_code=404, detail=f"Run '{run_id}' not found") + + +@router.delete("/{run_id}", response_model=Run, operation_id="delete_run") +def delete_run( + run_id: str, + user_id: Optional[str] = Header(None, alias="user_id"), + server: "SyncServer" = Depends(get_letta_server), +): + """ + Delete a run by its run_id. + """ + actor = server.user_manager.get_user_or_default(user_id=user_id) + + try: + job = server.job_manager.delete_job_by_id(job_id=run_id, actor=actor) + return Run.from_job(job) + except NoResultFound: + raise HTTPException(status_code=404, detail="Run not found") diff --git a/letta/server/rest_api/routers/v1/tags.py b/letta/server/rest_api/routers/v1/tags.py new file mode 100644 index 0000000000..b052ef8223 --- /dev/null +++ b/letta/server/rest_api/routers/v1/tags.py @@ -0,0 +1,27 @@ +from typing import TYPE_CHECKING, List, Optional + +from fastapi import APIRouter, Depends, Header, Query + +from letta.server.rest_api.utils import get_letta_server + +if TYPE_CHECKING: + from letta.server.server import SyncServer + + +router = APIRouter(prefix="/tags", tags=["tag", "admin"]) + + +@router.get("/", tags=["admin"], response_model=List[str], operation_id="list_tags") +def get_tags( + cursor: Optional[str] = Query(None), + limit: Optional[int] = Query(50), + server: "SyncServer" = Depends(get_letta_server), + query_text: Optional[str] = Query(None), + user_id: Optional[str] = Header(None, alias="user_id"), +): + """ + Get a list of all tags in the database + """ + actor = server.user_manager.get_user_or_default(user_id=user_id) + tags = server.agent_manager.list_tags(actor=actor, cursor=cursor, limit=limit, query_text=query_text) + return tags diff --git a/letta/server/rest_api/utils.py b/letta/server/rest_api/utils.py index bb5dc03407..d355d9e2d9 100644 --- a/letta/server/rest_api/utils.py +++ b/letta/server/rest_api/utils.py @@ -3,7 +3,7 @@ import os import warnings from enum import Enum -from typing import AsyncGenerator, Optional, Union +from typing import TYPE_CHECKING, AsyncGenerator, Optional, Union from fastapi import Header from pydantic import BaseModel @@ -11,7 +11,9 @@ from letta.errors import ContextWindowExceededError, RateLimitExceededError from letta.schemas.usage import LettaUsageStatistics from letta.server.rest_api.interface import StreamingServerInterface -from letta.server.server import SyncServer + +if TYPE_CHECKING: + from letta.server.server import SyncServer # from letta.orm.user import User # from letta.orm.utilities import get_db_session @@ -86,7 +88,7 @@ async def sse_async_generator( # TODO: why does this double up the interface? -def get_letta_server() -> SyncServer: +def get_letta_server() -> "SyncServer": # Check if a global server is already instantiated from letta.server.rest_api.app import server diff --git a/letta/server/server.py b/letta/server/server.py index 4e2cc3a17c..28d62ec8a8 100644 --- a/letta/server/server.py +++ b/letta/server/server.py @@ -1,4 +1,5 @@ # inspecting tools +import asyncio import os import traceback import warnings @@ -9,6 +10,7 @@ from composio.client import Composio from composio.client.collections import ActionModel, AppModel from fastapi import HTTPException +from fastapi.responses import StreamingResponse import letta.constants as constants import letta.server.utils as server_utils @@ -30,10 +32,11 @@ from letta.schemas.embedding_config import EmbeddingConfig # openai schemas -from letta.schemas.enums import JobStatus +from letta.schemas.enums import JobStatus, MessageStreamStatus from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate from letta.schemas.job import Job, JobUpdate -from letta.schemas.letta_message import LettaMessage, ToolReturnMessage +from letta.schemas.letta_message import LegacyLettaMessage, LettaMessage, ToolReturnMessage +from letta.schemas.letta_response import LettaResponse from letta.schemas.llm_config import LLMConfig from letta.schemas.memory import ArchivalMemorySummary, ContextWindowOverview, Memory, RecallMemorySummary from letta.schemas.message import Message, MessageCreate, MessageRole, MessageUpdate @@ -57,6 +60,8 @@ from letta.schemas.tool import Tool from letta.schemas.usage import LettaUsageStatistics from letta.schemas.user import User +from letta.server.rest_api.interface import StreamingServerInterface +from letta.server.rest_api.utils import sse_async_generator from letta.services.agent_manager import AgentManager from letta.services.block_manager import BlockManager from letta.services.job_manager import JobManager @@ -425,12 +430,17 @@ def _step( token_streaming = letta_agent.interface.streaming_mode if hasattr(letta_agent.interface, "streaming_mode") else False logger.debug(f"Starting agent step") + if interface: + metadata = interface.metadata if hasattr(interface, "metadata") else None + else: + metadata = None usage_stats = letta_agent.step( messages=input_messages, chaining=self.chaining, max_chaining_steps=self.max_chaining_steps, stream=token_streaming, skip_verify=True, + metadata=metadata, ) except Exception as e: @@ -687,6 +697,7 @@ def send_messages( wrap_user_message: bool = True, wrap_system_message: bool = True, interface: Union[AgentInterface, None] = None, # needed to getting responses + metadata: Optional[dict] = None, # Pass through metadata to interface ) -> LettaUsageStatistics: """Send a list of messages to the agent @@ -732,6 +743,10 @@ def send_messages( else: raise ValueError(f"All messages must be of type Message or MessageCreate, got {[type(message) for message in messages]}") + # Store metadata in interface if provided + if metadata and hasattr(interface, "metadata"): + interface.metadata = metadata + # Run the agent state forward return self._step(actor=actor, agent_id=agent_id, input_messages=message_objects, interface=interface) @@ -1183,3 +1198,125 @@ def get_composio_apps(self, api_key: Optional[str] = None) -> List["AppModel"]: def get_composio_actions_from_app_name(self, composio_app_name: str, api_key: Optional[str] = None) -> List["ActionModel"]: actions = self.get_composio_client(api_key=api_key).actions.get(apps=[composio_app_name]) return actions + + async def send_message_to_agent( + self, + agent_id: str, + actor: User, + # role: MessageRole, + messages: Union[List[Message], List[MessageCreate]], + stream_steps: bool, + stream_tokens: bool, + # related to whether or not we return `LettaMessage`s or `Message`s + chat_completion_mode: bool = False, + timestamp: Optional[datetime] = None, + # Support for AssistantMessage + use_assistant_message: bool = True, + assistant_message_tool_name: str = constants.DEFAULT_MESSAGE_TOOL, + assistant_message_tool_kwarg: str = constants.DEFAULT_MESSAGE_TOOL_KWARG, + metadata: Optional[dict] = None, + ) -> Union[StreamingResponse, LettaResponse]: + """Split off into a separate function so that it can be imported in the /chat/completion proxy.""" + + # TODO: @charles is this the correct way to handle? + include_final_message = True + + if not stream_steps and stream_tokens: + raise HTTPException(status_code=400, detail="stream_steps must be 'true' if stream_tokens is 'true'") + + # For streaming response + try: + + # TODO: move this logic into server.py + + # Get the generator object off of the agent's streaming interface + # This will be attached to the POST SSE request used under-the-hood + letta_agent = self.load_agent(agent_id=agent_id, actor=actor) + + # Disable token streaming if not OpenAI + # TODO: cleanup this logic + llm_config = letta_agent.agent_state.llm_config + if stream_tokens and (llm_config.model_endpoint_type != "openai" or "inference.memgpt.ai" in llm_config.model_endpoint): + warnings.warn( + "Token streaming is only supported for models with type 'openai' or `inference.memgpt.ai` in the model_endpoint: agent has endpoint type {llm_config.model_endpoint_type} and {llm_config.model_endpoint}. Setting stream_tokens to False." + ) + stream_tokens = False + + # Create a new interface per request + letta_agent.interface = StreamingServerInterface(use_assistant_message) + streaming_interface = letta_agent.interface + if not isinstance(streaming_interface, StreamingServerInterface): + raise ValueError(f"Agent has wrong type of interface: {type(streaming_interface)}") + + # Enable token-streaming within the request if desired + streaming_interface.streaming_mode = stream_tokens + # "chatcompletion mode" does some remapping and ignores inner thoughts + streaming_interface.streaming_chat_completion_mode = chat_completion_mode + + # streaming_interface.allow_assistant_message = stream + # streaming_interface.function_call_legacy_mode = stream + + # Allow AssistantMessage is desired by client + streaming_interface.assistant_message_tool_name = assistant_message_tool_name + streaming_interface.assistant_message_tool_kwarg = assistant_message_tool_kwarg + + # Related to JSON buffer reader + streaming_interface.inner_thoughts_in_kwargs = ( + llm_config.put_inner_thoughts_in_kwargs if llm_config.put_inner_thoughts_in_kwargs is not None else False + ) + + # Offload the synchronous message_func to a separate thread + streaming_interface.stream_start() + task = asyncio.create_task( + asyncio.to_thread( + self.send_messages, + actor=actor, + agent_id=agent_id, + messages=messages, + interface=streaming_interface, + metadata=metadata, + ) + ) + + if stream_steps: + # return a stream + return StreamingResponse( + sse_async_generator( + streaming_interface.get_generator(), + usage_task=task, + finish_message=include_final_message, + ), + media_type="text/event-stream", + ) + + else: + # buffer the stream, then return the list + generated_stream = [] + async for message in streaming_interface.get_generator(): + assert ( + isinstance(message, LettaMessage) + or isinstance(message, LegacyLettaMessage) + or isinstance(message, MessageStreamStatus) + ), type(message) + generated_stream.append(message) + if message == MessageStreamStatus.done: + break + + # Get rid of the stream status messages + filtered_stream = [d for d in generated_stream if not isinstance(d, MessageStreamStatus)] + usage = await task + + # By default the stream will be messages of type LettaMessage or LettaLegacyMessage + # If we want to convert these to Message, we can use the attached IDs + # NOTE: we will need to de-duplicate the Messsage IDs though (since Assistant->Inner+Func_Call) + # TODO: eventually update the interface to use `Message` and `MessageChunk` (new) inside the deque instead + return LettaResponse(messages=filtered_stream, usage=usage) + + except HTTPException: + raise + except Exception as e: + print(e) + import traceback + + traceback.print_exc() + raise HTTPException(status_code=500, detail=f"{e}") diff --git a/letta/services/agent_manager.py b/letta/services/agent_manager.py index 9ec76b233f..5088c7c7bb 100644 --- a/letta/services/agent_manager.py +++ b/letta/services/agent_manager.py @@ -4,11 +4,11 @@ import numpy as np from sqlalchemy import Select, func, literal, select, union_all -from letta.constants import BASE_MEMORY_TOOLS, BASE_TOOLS, MAX_EMBEDDING_DIM +from letta.constants import BASE_MEMORY_TOOLS, BASE_TOOLS, MAX_EMBEDDING_DIM, MULTI_AGENT_TOOLS from letta.embeddings import embedding_model from letta.log import get_logger from letta.orm import Agent as AgentModel -from letta.orm import AgentPassage +from letta.orm import AgentPassage, AgentsTags from letta.orm import Block as BlockModel from letta.orm import Source as SourceModel from letta.orm import SourcePassage, SourcesAgents @@ -22,6 +22,7 @@ from letta.schemas.embedding_config import EmbeddingConfig from letta.schemas.llm_config import LLMConfig from letta.schemas.message import Message as PydanticMessage +from letta.schemas.message import MessageCreate from letta.schemas.passage import Passage as PydanticPassage from letta.schemas.source import Source as PydanticSource from letta.schemas.tool_rule import ToolRule as PydanticToolRule @@ -87,6 +88,8 @@ def create_agent( tool_names = [] if agent_create.include_base_tools: tool_names.extend(BASE_TOOLS + BASE_MEMORY_TOOLS) + if agent_create.include_multi_agent_tools: + tool_names.extend(MULTI_AGENT_TOOLS) if agent_create.tools: tool_names.extend(agent_create.tools) # Remove duplicates @@ -125,13 +128,17 @@ def create_agent( actor=actor, ) - # TODO: See if we can merge this into the above SQL create call for performance reasons - # Generate a sequence of initial messages to put in the buffer + return self.append_initial_message_sequence_to_in_context_messages(actor, agent_state, agent_create.initial_message_sequence) + + @enforce_types + def append_initial_message_sequence_to_in_context_messages( + self, actor: PydanticUser, agent_state: PydanticAgentState, initial_message_sequence: Optional[List[MessageCreate]] = None + ) -> PydanticAgentState: init_messages = initialize_message_sequence( agent_state=agent_state, memory_edit_timestamp=get_utc_time(), include_initial_boot_message=True ) - if agent_create.initial_message_sequence is not None: + if initial_message_sequence is not None: # We always need the system prompt up front system_message_obj = PydanticMessage.dict_to_message( agent_id=agent_state.id, @@ -142,7 +149,7 @@ def create_agent( # Don't use anything else in the pregen sequence, instead use the provided sequence init_messages = [system_message_obj] init_messages.extend( - package_initial_message_sequence(agent_state.id, agent_create.initial_message_sequence, agent_state.llm_config.model, actor) + package_initial_message_sequence(agent_state.id, initial_message_sequence, agent_state.llm_config.model, actor) ) else: init_messages = [ @@ -263,6 +270,7 @@ def list_agents( match_all_tags: bool = False, cursor: Optional[str] = None, limit: Optional[int] = 50, + query_text: Optional[str] = None, **kwargs, ) -> List[PydanticAgentState]: """ @@ -276,6 +284,7 @@ def list_agents( cursor=cursor, limit=limit, organization_id=actor.organization_id if actor else None, + query_text=query_text, **kwargs, ) @@ -468,6 +477,55 @@ def append_to_in_context_messages(self, messages: List[PydanticMessage], agent_i message_ids += [m.id for m in messages] return self.set_in_context_messages(agent_id=agent_id, message_ids=message_ids, actor=actor) + @enforce_types + def reset_messages(self, agent_id: str, actor: PydanticUser, add_default_initial_messages: bool = False) -> PydanticAgentState: + """ + Removes all in-context messages for the specified agent by: + 1) Clearing the agent.messages relationship (which cascades delete-orphans). + 2) Resetting the message_ids list to empty. + 3) Committing the transaction. + + This action is destructive and cannot be undone once committed. + + Args: + add_default_initial_messages: If true, adds the default initial messages after resetting. + agent_id (str): The ID of the agent whose messages will be reset. + actor (PydanticUser): The user performing this action. + + Returns: + PydanticAgentState: The updated agent state with no linked messages. + """ + with self.session_maker() as session: + # Retrieve the existing agent (will raise NoResultFound if invalid) + agent = AgentModel.read(db_session=session, identifier=agent_id, actor=actor) + + # Because of cascade="all, delete-orphan" on agent.messages, setting + # this relationship to an empty list will physically remove them from the DB. + agent.messages = [] + + # Also clear out the message_ids field to keep in-context memory consistent + agent.message_ids = [] + + # Commit the update + agent.update(db_session=session, actor=actor) + + agent_state = agent.to_pydantic() + + if add_default_initial_messages: + return self.append_initial_message_sequence_to_in_context_messages(actor, agent_state) + else: + # We still want to always have a system message + init_messages = initialize_message_sequence( + agent_state=agent_state, memory_edit_timestamp=get_utc_time(), include_initial_boot_message=True + ) + system_message = PydanticMessage.dict_to_message( + agent_id=agent_state.id, + user_id=agent_state.created_by_id, + model=agent_state.llm_config.model, + openai_message_dict=init_messages[0], + ) + return self.append_to_in_context_messages([system_message], agent_id=agent_state.id, actor=actor) + # ====================================================================================================================== # Source Management # ====================================================================================================================== @@ -945,3 +1003,40 @@ def detach_tool(self, agent_id: str, tool_id: str, actor: PydanticUser) -> Pydan # Commit and refresh the agent agent.update(session, actor=actor) return agent.to_pydantic() + + # ====================================================================================================================== + # Tag Management + # ====================================================================================================================== + @enforce_types + def list_tags( + self, actor: PydanticUser, cursor: Optional[str] = None, limit: Optional[int] = 50, query_text: Optional[str] = None + ) -> List[str]: + """ + Get all tags a user has created, ordered alphabetically. + + Args: + actor: User performing the action. + cursor: Cursor for pagination. + limit: Maximum number of tags to return. + query_text: Query text to filter tags by. + + Returns: + List[str]: List of all tags. + """ + with self.session_maker() as session: + query = ( + session.query(AgentsTags.tag) + .join(AgentModel, AgentModel.id == AgentsTags.agent_id) + .filter(AgentModel.organization_id == actor.organization_id) + .distinct() + ) + + if query_text: + query = query.filter(AgentsTags.tag.ilike(f"%{query_text}%")) + + if cursor: + query = query.filter(AgentsTags.tag > cursor) + + query = query.order_by(AgentsTags.tag).limit(limit) + results = [tag[0] for tag in query.all()] + return results diff --git a/letta/services/job_manager.py b/letta/services/job_manager.py index 3b98d46391..b8ea803bc7 100644 --- a/letta/services/job_manager.py +++ b/letta/services/job_manager.py @@ -1,9 +1,23 @@ -from typing import List, Optional +from typing import List, Literal, Optional, Union +from sqlalchemy import select +from sqlalchemy.orm import Session + +from letta.orm.enums import JobType +from letta.orm.errors import NoResultFound from letta.orm.job import Job as JobModel -from letta.schemas.enums import JobStatus +from letta.orm.job_messages import JobMessage +from letta.orm.job_usage_statistics import JobUsageStatistics +from letta.orm.message import Message as MessageModel +from letta.orm.sqlalchemy_base import AccessType +from letta.schemas.enums import JobStatus, MessageRole from letta.schemas.job import Job as PydanticJob from letta.schemas.job import JobUpdate +from letta.schemas.letta_message import LettaMessage +from letta.schemas.letta_request import LettaRequestConfig +from letta.schemas.message import Message as PydanticMessage +from letta.schemas.run import Run as PydanticRun +from letta.schemas.usage import LettaUsageStatistics from letta.schemas.user import User as PydanticUser from letta.utils import enforce_types, get_utc_time @@ -18,7 +32,7 @@ def __init__(self): self.session_maker = db_context @enforce_types - def create_job(self, pydantic_job: PydanticJob, actor: PydanticUser) -> PydanticJob: + def create_job(self, pydantic_job: Union[PydanticJob, PydanticRun], actor: PydanticUser) -> Union[PydanticJob, PydanticRun]: """Create a new job based on the JobCreate schema.""" with self.session_maker() as session: # Associate the job with the user @@ -33,7 +47,7 @@ def update_job_by_id(self, job_id: str, job_update: JobUpdate, actor: PydanticUs """Update a job by its ID with the given JobUpdate object.""" with self.session_maker() as session: # Fetch the job by ID - job = JobModel.read(db_session=session, identifier=job_id) # TODO: Add this later , actor=actor) + job = self._verify_job_access(session=session, job_id=job_id, actor=actor, access=["write"]) # Update job attributes with only the fields that were explicitly set update_data = job_update.model_dump(exclude_unset=True, exclude_none=True) @@ -53,16 +67,21 @@ def get_job_by_id(self, job_id: str, actor: PydanticUser) -> PydanticJob: """Fetch a job by its ID.""" with self.session_maker() as session: # Retrieve job by ID using the Job model's read method - job = JobModel.read(db_session=session, identifier=job_id) # TODO: Add this later , actor=actor) + job = JobModel.read(db_session=session, identifier=job_id, actor=actor, access_type=AccessType.USER) return job.to_pydantic() @enforce_types def list_jobs( - self, actor: PydanticUser, cursor: Optional[str] = None, limit: Optional[int] = 50, statuses: Optional[List[JobStatus]] = None + self, + actor: PydanticUser, + cursor: Optional[str] = None, + limit: Optional[int] = 50, + statuses: Optional[List[JobStatus]] = None, + job_type: JobType = JobType.JOB, ) -> List[PydanticJob]: """List all jobs with optional pagination and status filter.""" with self.session_maker() as session: - filter_kwargs = {"user_id": actor.id} + filter_kwargs = {"user_id": actor.id, "job_type": job_type} # Add status filter if provided if statuses: @@ -80,6 +99,252 @@ def list_jobs( def delete_job_by_id(self, job_id: str, actor: PydanticUser) -> PydanticJob: """Delete a job by its ID.""" with self.session_maker() as session: - job = JobModel.read(db_session=session, identifier=job_id) # TODO: Add this later , actor=actor) - job.hard_delete(db_session=session) # TODO: Add this later , actor=actor) + job = self._verify_job_access(session=session, job_id=job_id, actor=actor) + job.hard_delete(db_session=session, actor=actor) return job.to_pydantic() + + @enforce_types + def get_job_messages( + self, + job_id: str, + actor: PydanticUser, + cursor: Optional[str] = None, + limit: Optional[int] = 100, + role: Optional[MessageRole] = None, + ascending: bool = True, + ) -> List[PydanticMessage]: + """ + Get all messages associated with a job. + + Args: + job_id: The ID of the job to get messages for + actor: The user making the request + cursor: Cursor for pagination + limit: Maximum number of messages to return + role: Optional filter for message role + ascending: Optional flag to sort in ascending order + + Returns: + List of messages associated with the job + + Raises: + NoResultFound: If the job does not exist or user does not have access + """ + with self.session_maker() as session: + # Build filters + filters = {} + if role is not None: + filters["role"] = role + + # Get messages + messages = MessageModel.list( + db_session=session, + cursor=cursor, + ascending=ascending, + limit=limit, + actor=actor, + join_model=JobMessage, + join_conditions=[MessageModel.id == JobMessage.message_id, JobMessage.job_id == job_id], + **filters, + ) + + return [message.to_pydantic() for message in messages] + + @enforce_types + def add_message_to_job(self, job_id: str, message_id: str, actor: PydanticUser) -> None: + """ + Associate a message with a job by creating a JobMessage record. + Each message can only be associated with one job. + + Args: + job_id: The ID of the job + message_id: The ID of the message to associate + actor: The user making the request + + Raises: + NoResultFound: If the job does not exist or user does not have access + """ + with self.session_maker() as session: + # First verify job exists and user has access + self._verify_job_access(session, job_id, actor, access=["write"]) + + # Create new JobMessage association + job_message = JobMessage(job_id=job_id, message_id=message_id) + session.add(job_message) + session.commit() + + @enforce_types + def get_job_usage(self, job_id: str, actor: PydanticUser) -> LettaUsageStatistics: + """ + Get usage statistics for a job. + + Args: + job_id: The ID of the job + actor: The user making the request + + Returns: + Usage statistics for the job + + Raises: + NoResultFound: If the job does not exist or user does not have access + """ + with self.session_maker() as session: + # First verify job exists and user has access + self._verify_job_access(session, job_id, actor) + + # Get the latest usage statistics for the job + latest_stats = ( + session.query(JobUsageStatistics) + .filter(JobUsageStatistics.job_id == job_id) + .order_by(JobUsageStatistics.created_at.desc()) + .first() + ) + + if not latest_stats: + return LettaUsageStatistics( + completion_tokens=0, + prompt_tokens=0, + total_tokens=0, + step_count=0, + ) + + return LettaUsageStatistics( + completion_tokens=latest_stats.completion_tokens, + prompt_tokens=latest_stats.prompt_tokens, + total_tokens=latest_stats.total_tokens, + step_count=latest_stats.step_count, + ) + + @enforce_types + def add_job_usage( + self, + job_id: str, + usage: LettaUsageStatistics, + step_id: Optional[str] = None, + actor: PydanticUser = None, + ) -> None: + """ + Add usage statistics for a job. + + Args: + job_id: The ID of the job + usage: Usage statistics for the job + step_id: Optional ID of the specific step within the job + actor: The user making the request + + Raises: + NoResultFound: If the job does not exist or user does not have access + """ + with self.session_maker() as session: + # First verify job exists and user has access + self._verify_job_access(session, job_id, actor, access=["write"]) + + # Create new usage statistics entry + usage_stats = JobUsageStatistics( + job_id=job_id, + completion_tokens=usage.completion_tokens, + prompt_tokens=usage.prompt_tokens, + total_tokens=usage.total_tokens, + step_count=usage.step_count, + step_id=step_id, + ) + if actor: + usage_stats._set_created_and_updated_by_fields(actor.id) + + session.add(usage_stats) + session.commit() + + @enforce_types + def get_run_messages_cursor( + self, + run_id: str, + actor: PydanticUser, + cursor: Optional[str] = None, + limit: Optional[int] = 100, + role: Optional[MessageRole] = None, + ascending: bool = True, + ) -> List[LettaMessage]: + """ + Get messages associated with a job using cursor-based pagination. + This is a wrapper around get_job_messages that provides cursor-based pagination. + + Args: + job_id: The ID of the job to get messages for + actor: The user making the request + cursor: Message ID to get messages after or before + limit: Maximum number of messages to return + ascending: Whether to return messages in ascending order + role: Optional role filter + + Returns: + List of LettaMessages associated with the job + + Raises: + NoResultFound: If the job does not exist or user does not have access + """ + messages = self.get_job_messages( + job_id=run_id, + actor=actor, + cursor=cursor, + limit=limit, + role=role, + ascending=ascending, + ) + + request_config = self._get_run_request_config(run_id) + + # Convert messages to LettaMessages + messages = [ + msg + for m in messages + for msg in m.to_letta_message( + assistant_message=request_config["use_assistant_message"], + assistant_message_tool_name=request_config["assistant_message_tool_name"], + assistant_message_tool_kwarg=request_config["assistant_message_tool_kwarg"], + ) + ] + + return messages + + def _verify_job_access( + self, + session: Session, + job_id: str, + actor: PydanticUser, + access: List[Literal["read", "write", "delete"]] = ["read"], + ) -> JobModel: + """ + Verify that a job exists and the user has the required access. + + Args: + session: The database session + job_id: The ID of the job to verify + actor: The user making the request + + Returns: + The job if it exists and the user has access + + Raises: + NoResultFound: If the job does not exist or user does not have access + """ + job_query = select(JobModel).where(JobModel.id == job_id) + job_query = JobModel.apply_access_predicate(job_query, actor, access, AccessType.USER) + job = session.execute(job_query).scalar_one_or_none() + if not job: + raise NoResultFound(f"Job with id {job_id} does not exist or user does not have access") + return job + + def _get_run_request_config(self, run_id: str) -> LettaRequestConfig: + """ + Get the request config for a job. + + Args: + job_id: The ID of the job to get messages for + + Returns: + The request config for the job + """ + with self.session_maker() as session: + job = session.query(JobModel).filter(JobModel.id == run_id).first() + request_config = job.request_config or LettaRequestConfig() + return request_config diff --git a/letta/services/tool_execution_sandbox.py b/letta/services/tool_execution_sandbox.py index 3e68ad291d..1d7b0d7327 100644 --- a/letta/services/tool_execution_sandbox.py +++ b/letta/services/tool_execution_sandbox.py @@ -38,7 +38,7 @@ class ToolExecutionSandbox: # We make this a long random string to avoid collisions with any variables in the user's code LOCAL_SANDBOX_RESULT_VAR_NAME = "result_ZQqiequkcFwRwwGQMqkt" - def __init__(self, tool_name: str, args: dict, user: User, force_recreate=False, tool_object: Optional[Tool] = None): + def __init__(self, tool_name: str, args: dict, user: User, force_recreate=True, tool_object: Optional[Tool] = None): self.tool_name = tool_name self.args = args self.user = user diff --git a/letta/services/tool_manager.py b/letta/services/tool_manager.py index 1992f21348..d219232912 100644 --- a/letta/services/tool_manager.py +++ b/letta/services/tool_manager.py @@ -2,7 +2,7 @@ import warnings from typing import List, Optional -from letta.constants import BASE_MEMORY_TOOLS, BASE_TOOLS +from letta.constants import BASE_MEMORY_TOOLS, BASE_TOOLS, MULTI_AGENT_TOOLS from letta.functions.functions import derive_openai_json_schema, load_function_set from letta.orm.enums import ToolType @@ -39,7 +39,7 @@ def create_or_update_tool(self, pydantic_tool: PydanticTool, actor: PydanticUser tool = self.get_tool_by_name(tool_name=pydantic_tool.name, actor=actor) if tool: # Put to dict and remove fields that should not be reset - update_data = pydantic_tool.model_dump(exclude={"module"}, exclude_unset=True, exclude_none=True) + update_data = pydantic_tool.model_dump(exclude_unset=True, exclude_none=True) # If there's anything to update if update_data: @@ -133,39 +133,42 @@ def delete_tool_by_id(self, tool_id: str, actor: PydanticUser) -> None: @enforce_types def upsert_base_tools(self, actor: PydanticUser) -> List[PydanticTool]: - """Add default tools in base.py""" - module_name = "base" - full_module_name = f"letta.functions.function_sets.{module_name}" - try: - module = importlib.import_module(full_module_name) - except Exception as e: - # Handle other general exceptions - raise e + """Add default tools in base.py and multi_agent.py""" + functions_to_schema = {} + module_names = ["base", "multi_agent"] - functions_to_schema = [] - try: - # Load the function set - functions_to_schema = load_function_set(module) - except ValueError as e: - err = f"Error loading function set '{module_name}': {e}" - warnings.warn(err) + for module_name in module_names: + full_module_name = f"letta.functions.function_sets.{module_name}" + try: + module = importlib.import_module(full_module_name) + except Exception as e: + # Handle other general exceptions + raise e + + try: + # Load the function set + functions_to_schema.update(load_function_set(module)) + except ValueError as e: + err = f"Error loading function set '{module_name}': {e}" + warnings.warn(err) # create tool in db tools = [] for name, schema in functions_to_schema.items(): - if name in BASE_TOOLS + BASE_MEMORY_TOOLS: - tags = [module_name] - if module_name == "base": - tags.append("letta-base") - - # BASE_MEMORY_TOOLS should be executed in an e2b sandbox - # so they should NOT be letta_core tools, instead, treated as custom tools + if name in BASE_TOOLS + BASE_MEMORY_TOOLS + MULTI_AGENT_TOOLS: if name in BASE_TOOLS: tool_type = ToolType.LETTA_CORE + tags = [tool_type.value] elif name in BASE_MEMORY_TOOLS: tool_type = ToolType.LETTA_MEMORY_CORE + tags = [tool_type.value] + elif name in MULTI_AGENT_TOOLS: + tool_type = ToolType.LETTA_MULTI_AGENT_CORE + tags = [tool_type.value] else: - raise ValueError(f"Tool name {name} is not in the list of base tool names: {BASE_TOOLS + BASE_MEMORY_TOOLS}") + raise ValueError( + f"Tool name {name} is not in the list of base tool names: {BASE_TOOLS + BASE_MEMORY_TOOLS + MULTI_AGENT_TOOLS}" + ) # create to tool tools.append( @@ -180,4 +183,6 @@ def upsert_base_tools(self, actor: PydanticUser) -> List[PydanticTool]: ) ) + # TODO: Delete any base tools that are stale + return tools diff --git a/letta/utils.py b/letta/utils.py index 5d2eb51393..18a5093ad5 100644 --- a/letta/utils.py +++ b/letta/utils.py @@ -534,12 +534,11 @@ def matches_type(value, hint): origin = get_origin(hint) args = get_args(hint) - if origin is list and isinstance(value, list): # Handle List[T] + if origin is Union: # Handle Union types (including Optional) + return any(matches_type(value, arg) for arg in args) + elif origin is list and isinstance(value, list): # Handle List[T] element_type = args[0] if args else None return all(isinstance(v, element_type) for v in value) if element_type else True - elif origin is Union and type(None) in args: # Handle Optional[T] - non_none_type = next(arg for arg in args if arg is not type(None)) - return value is None or matches_type(value, non_none_type) elif origin: # Handle other generics like Dict, Tuple, etc. return isinstance(value, origin) else: # Handle non-generic types diff --git a/poetry.lock b/poetry.lock index af3ce49641..9576f29afc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -713,13 +713,13 @@ test = ["pytest"] [[package]] name = "composio-core" -version = "0.6.11.post1" +version = "0.6.15" description = "Core package to act as a bridge between composio platform and other services." optional = false python-versions = "<4,>=3.9" files = [ - {file = "composio_core-0.6.11.post1-py3-none-any.whl", hash = "sha256:7cdd1c71ef845d9dc37de46bfe28e6f5f051e4efd96abe60fc31a651c46e6702"}, - {file = "composio_core-0.6.11.post1.tar.gz", hash = "sha256:93db130dc8f88aa4f426abe46cef640f96f86efa3f18113c34ad230202dee52c"}, + {file = "composio_core-0.6.15-py3-none-any.whl", hash = "sha256:ffb217409ca6a0743be29c8993ee15c23e6d29db628653054459b733fcc5f3d9"}, + {file = "composio_core-0.6.15.tar.gz", hash = "sha256:cd39b9890ad9582a23fe14a37cea732bbd6c2e99821a142f319f10dcf4d1acc0"}, ] [package.dependencies] @@ -749,13 +749,13 @@ tools = ["diskcache", "flake8", "networkx", "pathspec", "pygments", "ruff", "tra [[package]] name = "composio-langchain" -version = "0.6.11.post1" +version = "0.6.15" description = "Use Composio to get an array of tools with your LangChain agent." optional = false python-versions = "<4,>=3.9" files = [ - {file = "composio_langchain-0.6.11.post1-py3-none-any.whl", hash = "sha256:bd4dd562435aff9626f45e0396a2e5c9d49fd3d68a6caf42994730b0a5cd54ea"}, - {file = "composio_langchain-0.6.11.post1.tar.gz", hash = "sha256:1ff59cc2724e900f8b386e10ac9577adf3c95046f0052356d8efda36a456fc41"}, + {file = "composio_langchain-0.6.15-py3-none-any.whl", hash = "sha256:e79c8a521813e5b177a1d51bc4ddf98975dfe215243637317408a2c2c3455ea3"}, + {file = "composio_langchain-0.6.15.tar.gz", hash = "sha256:796008d94421a069423d1a449e62fcb0877473e18510f156b06a418559c0260e"}, ] [package.dependencies] @@ -892,37 +892,37 @@ vision = ["Pillow (>=9.4.0)"] [[package]] name = "debugpy" -version = "1.8.11" +version = "1.8.12" description = "An implementation of the Debug Adapter Protocol for Python" optional = false python-versions = ">=3.8" files = [ - {file = "debugpy-1.8.11-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:2b26fefc4e31ff85593d68b9022e35e8925714a10ab4858fb1b577a8a48cb8cd"}, - {file = "debugpy-1.8.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61bc8b3b265e6949855300e84dc93d02d7a3a637f2aec6d382afd4ceb9120c9f"}, - {file = "debugpy-1.8.11-cp310-cp310-win32.whl", hash = "sha256:c928bbf47f65288574b78518449edaa46c82572d340e2750889bbf8cd92f3737"}, - {file = "debugpy-1.8.11-cp310-cp310-win_amd64.whl", hash = "sha256:8da1db4ca4f22583e834dcabdc7832e56fe16275253ee53ba66627b86e304da1"}, - {file = "debugpy-1.8.11-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:85de8474ad53ad546ff1c7c7c89230db215b9b8a02754d41cb5a76f70d0be296"}, - {file = "debugpy-1.8.11-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ffc382e4afa4aee367bf413f55ed17bd91b191dcaf979890af239dda435f2a1"}, - {file = "debugpy-1.8.11-cp311-cp311-win32.whl", hash = "sha256:40499a9979c55f72f4eb2fc38695419546b62594f8af194b879d2a18439c97a9"}, - {file = "debugpy-1.8.11-cp311-cp311-win_amd64.whl", hash = "sha256:987bce16e86efa86f747d5151c54e91b3c1e36acc03ce1ddb50f9d09d16ded0e"}, - {file = "debugpy-1.8.11-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:84e511a7545d11683d32cdb8f809ef63fc17ea2a00455cc62d0a4dbb4ed1c308"}, - {file = "debugpy-1.8.11-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce291a5aca4985d82875d6779f61375e959208cdf09fcec40001e65fb0a54768"}, - {file = "debugpy-1.8.11-cp312-cp312-win32.whl", hash = "sha256:28e45b3f827d3bf2592f3cf7ae63282e859f3259db44ed2b129093ca0ac7940b"}, - {file = "debugpy-1.8.11-cp312-cp312-win_amd64.whl", hash = "sha256:44b1b8e6253bceada11f714acf4309ffb98bfa9ac55e4fce14f9e5d4484287a1"}, - {file = "debugpy-1.8.11-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:8988f7163e4381b0da7696f37eec7aca19deb02e500245df68a7159739bbd0d3"}, - {file = "debugpy-1.8.11-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c1f6a173d1140e557347419767d2b14ac1c9cd847e0b4c5444c7f3144697e4e"}, - {file = "debugpy-1.8.11-cp313-cp313-win32.whl", hash = "sha256:bb3b15e25891f38da3ca0740271e63ab9db61f41d4d8541745cfc1824252cb28"}, - {file = "debugpy-1.8.11-cp313-cp313-win_amd64.whl", hash = "sha256:d8768edcbeb34da9e11bcb8b5c2e0958d25218df7a6e56adf415ef262cd7b6d1"}, - {file = "debugpy-1.8.11-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:ad7efe588c8f5cf940f40c3de0cd683cc5b76819446abaa50dc0829a30c094db"}, - {file = "debugpy-1.8.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:189058d03a40103a57144752652b3ab08ff02b7595d0ce1f651b9acc3a3a35a0"}, - {file = "debugpy-1.8.11-cp38-cp38-win32.whl", hash = "sha256:32db46ba45849daed7ccf3f2e26f7a386867b077f39b2a974bb5c4c2c3b0a280"}, - {file = "debugpy-1.8.11-cp38-cp38-win_amd64.whl", hash = "sha256:116bf8342062246ca749013df4f6ea106f23bc159305843491f64672a55af2e5"}, - {file = "debugpy-1.8.11-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:654130ca6ad5de73d978057eaf9e582244ff72d4574b3e106fb8d3d2a0d32458"}, - {file = "debugpy-1.8.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23dc34c5e03b0212fa3c49a874df2b8b1b8fda95160bd79c01eb3ab51ea8d851"}, - {file = "debugpy-1.8.11-cp39-cp39-win32.whl", hash = "sha256:52d8a3166c9f2815bfae05f386114b0b2d274456980d41f320299a8d9a5615a7"}, - {file = "debugpy-1.8.11-cp39-cp39-win_amd64.whl", hash = "sha256:52c3cf9ecda273a19cc092961ee34eb9ba8687d67ba34cc7b79a521c1c64c4c0"}, - {file = "debugpy-1.8.11-py2.py3-none-any.whl", hash = "sha256:0e22f846f4211383e6a416d04b4c13ed174d24cc5d43f5fd52e7821d0ebc8920"}, - {file = "debugpy-1.8.11.tar.gz", hash = "sha256:6ad2688b69235c43b020e04fecccdf6a96c8943ca9c2fb340b8adc103c655e57"}, + {file = "debugpy-1.8.12-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:a2ba7ffe58efeae5b8fad1165357edfe01464f9aef25e814e891ec690e7dd82a"}, + {file = "debugpy-1.8.12-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbbd4149c4fc5e7d508ece083e78c17442ee13b0e69bfa6bd63003e486770f45"}, + {file = "debugpy-1.8.12-cp310-cp310-win32.whl", hash = "sha256:b202f591204023b3ce62ff9a47baa555dc00bb092219abf5caf0e3718ac20e7c"}, + {file = "debugpy-1.8.12-cp310-cp310-win_amd64.whl", hash = "sha256:9649eced17a98ce816756ce50433b2dd85dfa7bc92ceb60579d68c053f98dff9"}, + {file = "debugpy-1.8.12-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:36f4829839ef0afdfdd208bb54f4c3d0eea86106d719811681a8627ae2e53dd5"}, + {file = "debugpy-1.8.12-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a28ed481d530e3138553be60991d2d61103ce6da254e51547b79549675f539b7"}, + {file = "debugpy-1.8.12-cp311-cp311-win32.whl", hash = "sha256:4ad9a94d8f5c9b954e0e3b137cc64ef3f579d0df3c3698fe9c3734ee397e4abb"}, + {file = "debugpy-1.8.12-cp311-cp311-win_amd64.whl", hash = "sha256:4703575b78dd697b294f8c65588dc86874ed787b7348c65da70cfc885efdf1e1"}, + {file = "debugpy-1.8.12-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:7e94b643b19e8feb5215fa508aee531387494bf668b2eca27fa769ea11d9f498"}, + {file = "debugpy-1.8.12-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:086b32e233e89a2740c1615c2f775c34ae951508b28b308681dbbb87bba97d06"}, + {file = "debugpy-1.8.12-cp312-cp312-win32.whl", hash = "sha256:2ae5df899732a6051b49ea2632a9ea67f929604fd2b036613a9f12bc3163b92d"}, + {file = "debugpy-1.8.12-cp312-cp312-win_amd64.whl", hash = "sha256:39dfbb6fa09f12fae32639e3286112fc35ae976114f1f3d37375f3130a820969"}, + {file = "debugpy-1.8.12-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:696d8ae4dff4cbd06bf6b10d671e088b66669f110c7c4e18a44c43cf75ce966f"}, + {file = "debugpy-1.8.12-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:898fba72b81a654e74412a67c7e0a81e89723cfe2a3ea6fcd3feaa3395138ca9"}, + {file = "debugpy-1.8.12-cp313-cp313-win32.whl", hash = "sha256:22a11c493c70413a01ed03f01c3c3a2fc4478fc6ee186e340487b2edcd6f4180"}, + {file = "debugpy-1.8.12-cp313-cp313-win_amd64.whl", hash = "sha256:fdb3c6d342825ea10b90e43d7f20f01535a72b3a1997850c0c3cefa5c27a4a2c"}, + {file = "debugpy-1.8.12-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:b0232cd42506d0c94f9328aaf0d1d0785f90f87ae72d9759df7e5051be039738"}, + {file = "debugpy-1.8.12-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9af40506a59450f1315168d47a970db1a65aaab5df3833ac389d2899a5d63b3f"}, + {file = "debugpy-1.8.12-cp38-cp38-win32.whl", hash = "sha256:5cc45235fefac57f52680902b7d197fb2f3650112379a6fa9aa1b1c1d3ed3f02"}, + {file = "debugpy-1.8.12-cp38-cp38-win_amd64.whl", hash = "sha256:557cc55b51ab2f3371e238804ffc8510b6ef087673303890f57a24195d096e61"}, + {file = "debugpy-1.8.12-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:b5c6c967d02fee30e157ab5227706f965d5c37679c687b1e7bbc5d9e7128bd41"}, + {file = "debugpy-1.8.12-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88a77f422f31f170c4b7e9ca58eae2a6c8e04da54121900651dfa8e66c29901a"}, + {file = "debugpy-1.8.12-cp39-cp39-win32.whl", hash = "sha256:a4042edef80364239f5b7b5764e55fd3ffd40c32cf6753da9bda4ff0ac466018"}, + {file = "debugpy-1.8.12-cp39-cp39-win_amd64.whl", hash = "sha256:f30b03b0f27608a0b26c75f0bb8a880c752c0e0b01090551b9d87c7d783e2069"}, + {file = "debugpy-1.8.12-py2.py3-none-any.whl", hash = "sha256:274b6a2040349b5c9864e475284bce5bb062e63dce368a394b8cc865ae3b00c6"}, + {file = "debugpy-1.8.12.tar.gz", hash = "sha256:646530b04f45c830ceae8e491ca1c9320a2d2f0efea3141487c82130aba70dce"}, ] [[package]] @@ -2399,33 +2399,33 @@ typing-extensions = ">=4.7" [[package]] name = "langchain-openai" -version = "0.2.14" +version = "0.3.0" description = "An integration package connecting OpenAI and LangChain" optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "langchain_openai-0.2.14-py3-none-any.whl", hash = "sha256:d232496662f79ece9a11caf7d798ba863e559c771bc366814f7688e0fe664fe8"}, - {file = "langchain_openai-0.2.14.tar.gz", hash = "sha256:7a514f309e356b182a337c0ed36ab3fbe34d9834a235a3b85cb7f91ae775d978"}, + {file = "langchain_openai-0.3.0-py3-none-any.whl", hash = "sha256:49c921a22d272b04749a61e78bffa83aecdb8840b24b69f2909e115a357a9a5b"}, + {file = "langchain_openai-0.3.0.tar.gz", hash = "sha256:88d623eeb2aaa1fff65c2b419a4a1cfd37d3a1d504e598b87cf0bc822a3b70d0"}, ] [package.dependencies] -langchain-core = ">=0.3.27,<0.4.0" +langchain-core = ">=0.3.29,<0.4.0" openai = ">=1.58.1,<2.0.0" tiktoken = ">=0.7,<1" [[package]] name = "langchain-text-splitters" -version = "0.3.4" +version = "0.3.5" description = "LangChain text splitting utilities" optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "langchain_text_splitters-0.3.4-py3-none-any.whl", hash = "sha256:432fdb39c161d4d0db16d61d38af068dc5dd4dd08082febd2fced81304b2725c"}, - {file = "langchain_text_splitters-0.3.4.tar.gz", hash = "sha256:f3cedea469684483b4492d9f11dc2fa66388dab01c5d5c5307925515ab884c24"}, + {file = "langchain_text_splitters-0.3.5-py3-none-any.whl", hash = "sha256:8c9b059827438c5fa8f327b4df857e307828a5ec815163c9b5c9569a3e82c8ee"}, + {file = "langchain_text_splitters-0.3.5.tar.gz", hash = "sha256:11cb7ca3694e5bdd342bc16d3875b7f7381651d4a53cbb91d34f22412ae16443"}, ] [package.dependencies] -langchain-core = ">=0.3.26,<0.4.0" +langchain-core = ">=0.3.29,<0.4.0" [[package]] name = "langchainhub" @@ -2468,15 +2468,33 @@ requests-toolbelt = ">=1.0.0,<2.0.0" compression = ["zstandard (>=0.23.0,<0.24.0)"] langsmith-pyo3 = ["langsmith-pyo3 (>=0.1.0rc2,<0.2.0)"] +[[package]] +name = "letta-client" +version = "0.1.15" +description = "" +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "letta_client-0.1.15-py3-none-any.whl", hash = "sha256:31b4134769f3241736389eac70c3f8f204044ac6346cbd490ef536f003ab2386"}, + {file = "letta_client-0.1.15.tar.gz", hash = "sha256:42cf84a0a7f344f1e7d0c809aeea2d1c6e73eccd8c80655b762f696b41f4c8e9"}, +] + +[package.dependencies] +httpx = ">=0.21.2" +httpx-sse = "0.4.0" +pydantic = ">=1.9.2" +pydantic-core = ">=2.18.2,<3.0.0" +typing_extensions = ">=4.0.0" + [[package]] name = "llama-cloud" -version = "0.1.7" +version = "0.1.9" description = "" optional = false python-versions = "<4,>=3.8" files = [ - {file = "llama_cloud-0.1.7-py3-none-any.whl", hash = "sha256:266db22939c537a2b802eea6a9af2701beff98d5ba46513248011a4f1c17afc6"}, - {file = "llama_cloud-0.1.7.tar.gz", hash = "sha256:7c1767cb209905400e894566661a91230bcff83cd4d9c08e782fd2143ca6a646"}, + {file = "llama_cloud-0.1.9-py3-none-any.whl", hash = "sha256:792ee316985bbf4dd0294007105a100489d4baba0bcc4f3e16284f0c01d832d4"}, + {file = "llama_cloud-0.1.9.tar.gz", hash = "sha256:fc03bd338a1da04b7607a44d82a62b3eb178d80af05a53653e801d6f8bb67df7"}, ] [package.dependencies] @@ -2486,19 +2504,19 @@ pydantic = ">=1.10" [[package]] name = "llama-index" -version = "0.12.9" +version = "0.12.11" description = "Interface between LLMs and your data" optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "llama_index-0.12.9-py3-none-any.whl", hash = "sha256:95c39d8055c7d19bd5f099560b53c0971ae9997ebe46f7438766189ed48e4456"}, - {file = "llama_index-0.12.9.tar.gz", hash = "sha256:2f8d671e6ca7e5b33b0f5cbddef8c0a11eb1e39781f1be65e9bd0c4a7a0deb5b"}, + {file = "llama_index-0.12.11-py3-none-any.whl", hash = "sha256:007361c35e1981a1656cef287b7bcdf22aa88e7d41b8e3a8ee261bb5a10519a9"}, + {file = "llama_index-0.12.11.tar.gz", hash = "sha256:b1116946a2414aec104a6c417b847da5b4f077a0966c50ebd2fc445cd713adce"}, ] [package.dependencies] llama-index-agent-openai = ">=0.4.0,<0.5.0" llama-index-cli = ">=0.4.0,<0.5.0" -llama-index-core = ">=0.12.9,<0.13.0" +llama-index-core = ">=0.12.11,<0.13.0" llama-index-embeddings-openai = ">=0.3.0,<0.4.0" llama-index-indices-managed-llama-cloud = ">=0.4.0" llama-index-llms-openai = ">=0.3.0,<0.4.0" @@ -2511,17 +2529,17 @@ nltk = ">3.8.1" [[package]] name = "llama-index-agent-openai" -version = "0.4.1" +version = "0.4.2" description = "llama-index agent openai integration" optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "llama_index_agent_openai-0.4.1-py3-none-any.whl", hash = "sha256:162507543082f739a8c806911344c8d7f2434d0ee91124cfdd7b0ba5f76d0e57"}, - {file = "llama_index_agent_openai-0.4.1.tar.gz", hash = "sha256:3a89137b228a6e9c2b3f46e367a27b75fb31b458e21777bba819de654707d59e"}, + {file = "llama_index_agent_openai-0.4.2-py3-none-any.whl", hash = "sha256:e100b8a743b11fef373b5be31be590b929950a4d7fd9d158b5f014dd8fd7976e"}, + {file = "llama_index_agent_openai-0.4.2.tar.gz", hash = "sha256:0f8aeb091fc834b2667a46ad2417fc8601bf1c08ccfd1a3d15ede90a30eb1a29"}, ] [package.dependencies] -llama-index-core = ">=0.12.0,<0.13.0" +llama-index-core = ">=0.12.11,<0.13.0" llama-index-llms-openai = ">=0.3.0,<0.4.0" openai = ">=1.14.0" @@ -2543,13 +2561,13 @@ llama-index-llms-openai = ">=0.3.0,<0.4.0" [[package]] name = "llama-index-core" -version = "0.12.10.post1" +version = "0.12.11" description = "Interface between LLMs and your data" optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "llama_index_core-0.12.10.post1-py3-none-any.whl", hash = "sha256:897e8cd4efeff6842580b043bdf4008ac60f693df1de2bfd975307a4845707c2"}, - {file = "llama_index_core-0.12.10.post1.tar.gz", hash = "sha256:af27bea4d1494ba84983a649976e60e3de677a73946aa45ed12ce27e3a623ddf"}, + {file = "llama_index_core-0.12.11-py3-none-any.whl", hash = "sha256:3b1e019c899e9e011dfa01c96b7e3f666e0c161035fbca6cb787b4c61e0c94db"}, + {file = "llama_index_core-0.12.11.tar.gz", hash = "sha256:9a41ca91167ea5eec9ebaac7f5e958b7feddbd8af3bfbf7c393a5edfb994d566"}, ] [package.dependencies] @@ -2608,13 +2626,13 @@ llama-index-core = ">=0.12.0,<0.13.0" [[package]] name = "llama-index-llms-openai" -version = "0.3.12" +version = "0.3.13" description = "llama-index llms openai integration" optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "llama_index_llms_openai-0.3.12-py3-none-any.whl", hash = "sha256:08be76b9e649f6085e93292504074728a6531eb7f8930eaf40a2fce70a9f59df"}, - {file = "llama_index_llms_openai-0.3.12.tar.gz", hash = "sha256:1880273a7e409c05f1dbccdbac5ce3c214771901cd3696aeb556a29dfed8477a"}, + {file = "llama_index_llms_openai-0.3.13-py3-none-any.whl", hash = "sha256:caea1d6cb5bdd34518fcefe28b784698c92120ed133e6cd4591f777cd15180b0"}, + {file = "llama_index_llms_openai-0.3.13.tar.gz", hash = "sha256:51dda240dae7671c37e84bb50fe77fe6bb58a9b2a7e33dccd84473c9998afcea"}, ] [package.dependencies] @@ -2670,13 +2688,13 @@ llama-index-program-openai = ">=0.3.0,<0.4.0" [[package]] name = "llama-index-readers-file" -version = "0.4.2" +version = "0.4.3" description = "llama-index readers file integration" optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "llama_index_readers_file-0.4.2-py3-none-any.whl", hash = "sha256:9341ff375aae3ab58256af4fc7c6619e08b04a1e78bc5c9d3d1763df3b9223a6"}, - {file = "llama_index_readers_file-0.4.2.tar.gz", hash = "sha256:d677a2eef0695d00b487ac4ea14c82e6a4eaade3a09c540f8f81626d852e3491"}, + {file = "llama_index_readers_file-0.4.3-py3-none-any.whl", hash = "sha256:c669da967ea534e3af3660f9fd730c71c725288f5c57906bcce338414ebeee5c"}, + {file = "llama_index_readers_file-0.4.3.tar.gz", hash = "sha256:07514bebed7ce431c1b3ef9279d09aa3d1bba8e342d661860a033355b98fb33a"}, ] [package.dependencies] @@ -2869,13 +2887,13 @@ files = [ [[package]] name = "marshmallow" -version = "3.24.0" +version = "3.25.1" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.9" files = [ - {file = "marshmallow-3.24.0-py3-none-any.whl", hash = "sha256:459922b7a1fd3d29d5082ddcadfcea0efd98985030e71d3ef0dd8f44f406e41d"}, - {file = "marshmallow-3.24.0.tar.gz", hash = "sha256:378572f727e52123d00de1bdd9b7ea7bed18bbfedc7f9bfbcddaf78925a8d602"}, + {file = "marshmallow-3.25.1-py3-none-any.whl", hash = "sha256:ec5d00d873ce473b7f2ffcb7104286a376c354cab0c2fa12f5573dab03e87210"}, + {file = "marshmallow-3.25.1.tar.gz", hash = "sha256:f4debda3bb11153d81ac34b0d582bf23053055ee11e791b54b4b35493468040a"}, ] [package.dependencies] @@ -2883,7 +2901,7 @@ packaging = ">=17.0" [package.extras] dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"] -docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.14)", "sphinx (==8.1.3)", "sphinx-issues (==5.0.0)"] +docs = ["autodocsumm (==0.2.14)", "furo (==2024.8.6)", "sphinx (==8.1.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.0)", "sphinxext-opengraph (==0.9.1)"] tests = ["pytest", "simplejson"] [[package]] @@ -3236,13 +3254,13 @@ files = [ [[package]] name = "openai" -version = "1.59.3" +version = "1.59.7" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" files = [ - {file = "openai-1.59.3-py3-none-any.whl", hash = "sha256:b041887a0d8f3e70d1fc6ffbb2bf7661c3b9a2f3e806c04bf42f572b9ac7bc37"}, - {file = "openai-1.59.3.tar.gz", hash = "sha256:7f7fff9d8729968588edf1524e73266e8593bb6cab09298340efb755755bb66f"}, + {file = "openai-1.59.7-py3-none-any.whl", hash = "sha256:cfa806556226fa96df7380ab2e29814181d56fea44738c2b0e581b462c268692"}, + {file = "openai-1.59.7.tar.gz", hash = "sha256:043603def78c00befb857df9f0a16ee76a3af5984ba40cb7ee5e2f40db4646bf"}, ] [package.dependencies] @@ -3261,86 +3279,86 @@ realtime = ["websockets (>=13,<15)"] [[package]] name = "orjson" -version = "3.10.13" +version = "3.10.14" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.10.13-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1232c5e873a4d1638ef957c5564b4b0d6f2a6ab9e207a9b3de9de05a09d1d920"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d26a0eca3035619fa366cbaf49af704c7cb1d4a0e6c79eced9f6a3f2437964b6"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d4b6acd7c9c829895e50d385a357d4b8c3fafc19c5989da2bae11783b0fd4977"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1884e53c6818686891cc6fc5a3a2540f2f35e8c76eac8dc3b40480fb59660b00"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a428afb5720f12892f64920acd2eeb4d996595bf168a26dd9190115dbf1130d"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba5b13b8739ce5b630c65cb1c85aedbd257bcc2b9c256b06ab2605209af75a2e"}, - {file = "orjson-3.10.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cab83e67f6aabda1b45882254b2598b48b80ecc112968fc6483fa6dae609e9f0"}, - {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:62c3cc00c7e776c71c6b7b9c48c5d2701d4c04e7d1d7cdee3572998ee6dc57cc"}, - {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:dc03db4922e75bbc870b03fc49734cefbd50fe975e0878327d200022210b82d8"}, - {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:22f1c9a30b43d14a041a6ea190d9eca8a6b80c4beb0e8b67602c82d30d6eec3e"}, - {file = "orjson-3.10.13-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b42f56821c29e697c68d7d421410d7c1d8f064ae288b525af6a50cf99a4b1200"}, - {file = "orjson-3.10.13-cp310-cp310-win32.whl", hash = "sha256:0dbf3b97e52e093d7c3e93eb5eb5b31dc7535b33c2ad56872c83f0160f943487"}, - {file = "orjson-3.10.13-cp310-cp310-win_amd64.whl", hash = "sha256:46c249b4e934453be4ff2e518cd1adcd90467da7391c7a79eaf2fbb79c51e8c7"}, - {file = "orjson-3.10.13-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a36c0d48d2f084c800763473020a12976996f1109e2fcb66cfea442fdf88047f"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0065896f85d9497990731dfd4a9991a45b0a524baec42ef0a63c34630ee26fd6"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92b4ec30d6025a9dcdfe0df77063cbce238c08d0404471ed7a79f309364a3d19"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a94542d12271c30044dadad1125ee060e7a2048b6c7034e432e116077e1d13d2"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3723e137772639af8adb68230f2aa4bcb27c48b3335b1b1e2d49328fed5e244c"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f00c7fb18843bad2ac42dc1ce6dd214a083c53f1e324a0fd1c8137c6436269b"}, - {file = "orjson-3.10.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0e2759d3172300b2f892dee85500b22fca5ac49e0c42cfff101aaf9c12ac9617"}, - {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ee948c6c01f6b337589c88f8e0bb11e78d32a15848b8b53d3f3b6fea48842c12"}, - {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:aa6fe68f0981fba0d4bf9cdc666d297a7cdba0f1b380dcd075a9a3dd5649a69e"}, - {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:dbcd7aad6bcff258f6896abfbc177d54d9b18149c4c561114f47ebfe74ae6bfd"}, - {file = "orjson-3.10.13-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2149e2fcd084c3fd584881c7f9d7f9e5ad1e2e006609d8b80649655e0d52cd02"}, - {file = "orjson-3.10.13-cp311-cp311-win32.whl", hash = "sha256:89367767ed27b33c25c026696507c76e3d01958406f51d3a2239fe9e91959df2"}, - {file = "orjson-3.10.13-cp311-cp311-win_amd64.whl", hash = "sha256:dca1d20f1af0daff511f6e26a27354a424f0b5cf00e04280279316df0f604a6f"}, - {file = "orjson-3.10.13-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a3614b00621c77f3f6487792238f9ed1dd8a42f2ec0e6540ee34c2d4e6db813a"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c976bad3996aa027cd3aef78aa57873f3c959b6c38719de9724b71bdc7bd14b"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f74d878d1efb97a930b8a9f9898890067707d683eb5c7e20730030ecb3fb930"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33ef84f7e9513fb13b3999c2a64b9ca9c8143f3da9722fbf9c9ce51ce0d8076e"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd2bcde107221bb9c2fa0c4aaba735a537225104173d7e19cf73f70b3126c993"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:064b9dbb0217fd64a8d016a8929f2fae6f3312d55ab3036b00b1d17399ab2f3e"}, - {file = "orjson-3.10.13-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0044b0b8c85a565e7c3ce0a72acc5d35cda60793edf871ed94711e712cb637d"}, - {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7184f608ad563032e398f311910bc536e62b9fbdca2041be889afcbc39500de8"}, - {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:d36f689e7e1b9b6fb39dbdebc16a6f07cbe994d3644fb1c22953020fc575935f"}, - {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:54433e421618cd5873e51c0e9d0b9fb35f7bf76eb31c8eab20b3595bb713cd3d"}, - {file = "orjson-3.10.13-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e1ba0c5857dd743438acecc1cd0e1adf83f0a81fee558e32b2b36f89e40cee8b"}, - {file = "orjson-3.10.13-cp312-cp312-win32.whl", hash = "sha256:a42b9fe4b0114b51eb5cdf9887d8c94447bc59df6dbb9c5884434eab947888d8"}, - {file = "orjson-3.10.13-cp312-cp312-win_amd64.whl", hash = "sha256:3a7df63076435f39ec024bdfeb4c9767ebe7b49abc4949068d61cf4857fa6d6c"}, - {file = "orjson-3.10.13-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:2cdaf8b028a976ebab837a2c27b82810f7fc76ed9fb243755ba650cc83d07730"}, - {file = "orjson-3.10.13-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48a946796e390cbb803e069472de37f192b7a80f4ac82e16d6eb9909d9e39d56"}, - {file = "orjson-3.10.13-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7d64f1db5ecbc21eb83097e5236d6ab7e86092c1cd4c216c02533332951afc"}, - {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:711878da48f89df194edd2ba603ad42e7afed74abcd2bac164685e7ec15f96de"}, - {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:cf16f06cb77ce8baf844bc222dbcb03838f61d0abda2c3341400c2b7604e436e"}, - {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8257c3fb8dd7b0b446b5e87bf85a28e4071ac50f8c04b6ce2d38cb4abd7dff57"}, - {file = "orjson-3.10.13-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d9c3a87abe6f849a4a7ac8a8a1dede6320a4303d5304006b90da7a3cd2b70d2c"}, - {file = "orjson-3.10.13-cp313-cp313-win32.whl", hash = "sha256:527afb6ddb0fa3fe02f5d9fba4920d9d95da58917826a9be93e0242da8abe94a"}, - {file = "orjson-3.10.13-cp313-cp313-win_amd64.whl", hash = "sha256:b5f7c298d4b935b222f52d6c7f2ba5eafb59d690d9a3840b7b5c5cda97f6ec5c"}, - {file = "orjson-3.10.13-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e49333d1038bc03a25fdfe11c86360df9b890354bfe04215f1f54d030f33c342"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:003721c72930dbb973f25c5d8e68d0f023d6ed138b14830cc94e57c6805a2eab"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:63664bf12addb318dc8f032160e0f5dc17eb8471c93601e8f5e0d07f95003784"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6066729cf9552d70de297b56556d14b4f49c8f638803ee3c90fd212fa43cc6af"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8a1152e2761025c5d13b5e1908d4b1c57f3797ba662e485ae6f26e4e0c466388"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69b21d91c5c5ef8a201036d207b1adf3aa596b930b6ca3c71484dd11386cf6c3"}, - {file = "orjson-3.10.13-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b12a63f48bb53dba8453d36ca2661f2330126d54e26c1661e550b32864b28ce3"}, - {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a5a7624ab4d121c7e035708c8dd1f99c15ff155b69a1c0affc4d9d8b551281ba"}, - {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:0fee076134398d4e6cb827002468679ad402b22269510cf228301b787fdff5ae"}, - {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ae537fcf330b3947e82c6ae4271e092e6cf16b9bc2cef68b14ffd0df1fa8832a"}, - {file = "orjson-3.10.13-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f81b26c03f5fb5f0d0ee48d83cea4d7bc5e67e420d209cc1a990f5d1c62f9be0"}, - {file = "orjson-3.10.13-cp38-cp38-win32.whl", hash = "sha256:0bc858086088b39dc622bc8219e73d3f246fb2bce70a6104abd04b3a080a66a8"}, - {file = "orjson-3.10.13-cp38-cp38-win_amd64.whl", hash = "sha256:3ca6f17467ebbd763f8862f1d89384a5051b461bb0e41074f583a0ebd7120e8e"}, - {file = "orjson-3.10.13-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:4a11532cbfc2f5752c37e84863ef8435b68b0e6d459b329933294f65fa4bda1a"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c96d2fb80467d1d0dfc4d037b4e1c0f84f1fe6229aa7fea3f070083acef7f3d7"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dda4ba4d3e6f6c53b6b9c35266788053b61656a716a7fef5c884629c2a52e7aa"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4f998bbf300690be881772ee9c5281eb9c0044e295bcd4722504f5b5c6092ff"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dce1cc42ed75b585c0c4dc5eb53a90a34ccb493c09a10750d1a1f9b9eff2bd12"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03b0f29d485411e3c13d79604b740b14e4e5fb58811743f6f4f9693ee6480a8f"}, - {file = "orjson-3.10.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:233aae4474078d82f425134bb6a10fb2b3fc5a1a1b3420c6463ddd1b6a97eda8"}, - {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e384e330a67cf52b3597ee2646de63407da6f8fc9e9beec3eaaaef5514c7a1c9"}, - {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:4222881d0aab76224d7b003a8e5fdae4082e32c86768e0e8652de8afd6c4e2c1"}, - {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e400436950ba42110a20c50c80dff4946c8e3ec09abc1c9cf5473467e83fd1c5"}, - {file = "orjson-3.10.13-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f47c9e7d224b86ffb086059cdcf634f4b3f32480f9838864aa09022fe2617ce2"}, - {file = "orjson-3.10.13-cp39-cp39-win32.whl", hash = "sha256:a9ecea472f3eb653e1c0a3d68085f031f18fc501ea392b98dcca3e87c24f9ebe"}, - {file = "orjson-3.10.13-cp39-cp39-win_amd64.whl", hash = "sha256:5385935a73adce85cc7faac9d396683fd813566d3857fa95a0b521ef84a5b588"}, - {file = "orjson-3.10.13.tar.gz", hash = "sha256:eb9bfb14ab8f68d9d9492d4817ae497788a15fd7da72e14dfabc289c3bb088ec"}, + {file = "orjson-3.10.14-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:849ea7845a55f09965826e816cdc7689d6cf74fe9223d79d758c714af955bcb6"}, + {file = "orjson-3.10.14-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5947b139dfa33f72eecc63f17e45230a97e741942955a6c9e650069305eb73d"}, + {file = "orjson-3.10.14-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cde6d76910d3179dae70f164466692f4ea36da124d6fb1a61399ca589e81d69a"}, + {file = "orjson-3.10.14-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6dfbaeb7afa77ca608a50e2770a0461177b63a99520d4928e27591b142c74b1"}, + {file = "orjson-3.10.14-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa45e489ef80f28ff0e5ba0a72812b8cfc7c1ef8b46a694723807d1b07c89ebb"}, + {file = "orjson-3.10.14-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f5007abfdbb1d866e2aa8990bd1c465f0f6da71d19e695fc278282be12cffa5"}, + {file = "orjson-3.10.14-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1b49e2af011c84c3f2d541bb5cd1e3c7c2df672223e7e3ea608f09cf295e5f8a"}, + {file = "orjson-3.10.14-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:164ac155109226b3a2606ee6dda899ccfbe6e7e18b5bdc3fbc00f79cc074157d"}, + {file = "orjson-3.10.14-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:6b1225024cf0ef5d15934b5ffe9baf860fe8bc68a796513f5ea4f5056de30bca"}, + {file = "orjson-3.10.14-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d6546e8073dc382e60fcae4a001a5a1bc46da5eab4a4878acc2d12072d6166d5"}, + {file = "orjson-3.10.14-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9f1d2942605c894162252d6259b0121bf1cb493071a1ea8cb35d79cb3e6ac5bc"}, + {file = "orjson-3.10.14-cp310-cp310-win32.whl", hash = "sha256:397083806abd51cf2b3bbbf6c347575374d160331a2d33c5823e22249ad3118b"}, + {file = "orjson-3.10.14-cp310-cp310-win_amd64.whl", hash = "sha256:fa18f949d3183a8d468367056be989666ac2bef3a72eece0bade9cdb733b3c28"}, + {file = "orjson-3.10.14-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:f506fd666dd1ecd15a832bebc66c4df45c1902fd47526292836c339f7ba665a9"}, + {file = "orjson-3.10.14-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efe5fd254cfb0eeee13b8ef7ecb20f5d5a56ddda8a587f3852ab2cedfefdb5f6"}, + {file = "orjson-3.10.14-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4ddc8c866d7467f5ee2991397d2ea94bcf60d0048bdd8ca555740b56f9042725"}, + {file = "orjson-3.10.14-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3af8e42ae4363773658b8d578d56dedffb4f05ceeb4d1d4dd3fb504950b45526"}, + {file = "orjson-3.10.14-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84dd83110503bc10e94322bf3ffab8bc49150176b49b4984dc1cce4c0a993bf9"}, + {file = "orjson-3.10.14-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36f5bfc0399cd4811bf10ec7a759c7ab0cd18080956af8ee138097d5b5296a95"}, + {file = "orjson-3.10.14-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868943660fb2a1e6b6b965b74430c16a79320b665b28dd4511d15ad5038d37d5"}, + {file = "orjson-3.10.14-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:33449c67195969b1a677533dee9d76e006001213a24501333624623e13c7cc8e"}, + {file = "orjson-3.10.14-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:e4c9f60f9fb0b5be66e416dcd8c9d94c3eabff3801d875bdb1f8ffc12cf86905"}, + {file = "orjson-3.10.14-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0de4d6315cfdbd9ec803b945c23b3a68207fd47cbe43626036d97e8e9561a436"}, + {file = "orjson-3.10.14-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:83adda3db595cb1a7e2237029b3249c85afbe5c747d26b41b802e7482cb3933e"}, + {file = "orjson-3.10.14-cp311-cp311-win32.whl", hash = "sha256:998019ef74a4997a9d741b1473533cdb8faa31373afc9849b35129b4b8ec048d"}, + {file = "orjson-3.10.14-cp311-cp311-win_amd64.whl", hash = "sha256:9d034abdd36f0f0f2240f91492684e5043d46f290525d1117712d5b8137784eb"}, + {file = "orjson-3.10.14-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:2ad4b7e367efba6dc3f119c9a0fcd41908b7ec0399a696f3cdea7ec477441b09"}, + {file = "orjson-3.10.14-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f496286fc85e93ce0f71cc84fc1c42de2decf1bf494094e188e27a53694777a7"}, + {file = "orjson-3.10.14-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c7f189bbfcded40e41a6969c1068ba305850ba016665be71a217918931416fbf"}, + {file = "orjson-3.10.14-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8cc8204f0b75606869c707da331058ddf085de29558b516fc43c73ee5ee2aadb"}, + {file = "orjson-3.10.14-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deaa2899dff7f03ab667e2ec25842d233e2a6a9e333efa484dfe666403f3501c"}, + {file = "orjson-3.10.14-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1c3ea52642c9714dc6e56de8a451a066f6d2707d273e07fe8a9cc1ba073813d"}, + {file = "orjson-3.10.14-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9d3f9ed72e7458ded9a1fb1b4d4ed4c4fdbaf82030ce3f9274b4dc1bff7ace2b"}, + {file = "orjson-3.10.14-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:07520685d408a2aba514c17ccc16199ff2934f9f9e28501e676c557f454a37fe"}, + {file = "orjson-3.10.14-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:76344269b550ea01488d19a2a369ab572c1ac4449a72e9f6ac0d70eb1cbfb953"}, + {file = "orjson-3.10.14-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e2979d0f2959990620f7e62da6cd954e4620ee815539bc57a8ae46e2dacf90e3"}, + {file = "orjson-3.10.14-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:03f61ca3674555adcb1aa717b9fc87ae936aa7a63f6aba90a474a88701278780"}, + {file = "orjson-3.10.14-cp312-cp312-win32.whl", hash = "sha256:d5075c54edf1d6ad81d4c6523ce54a748ba1208b542e54b97d8a882ecd810fd1"}, + {file = "orjson-3.10.14-cp312-cp312-win_amd64.whl", hash = "sha256:175cafd322e458603e8ce73510a068d16b6e6f389c13f69bf16de0e843d7d406"}, + {file = "orjson-3.10.14-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:0905ca08a10f7e0e0c97d11359609300eb1437490a7f32bbaa349de757e2e0c7"}, + {file = "orjson-3.10.14-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92d13292249f9f2a3e418cbc307a9fbbef043c65f4bd8ba1eb620bc2aaba3d15"}, + {file = "orjson-3.10.14-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90937664e776ad316d64251e2fa2ad69265e4443067668e4727074fe39676414"}, + {file = "orjson-3.10.14-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9ed3d26c4cb4f6babaf791aa46a029265850e80ec2a566581f5c2ee1a14df4f1"}, + {file = "orjson-3.10.14-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:56ee546c2bbe9599aba78169f99d1dc33301853e897dbaf642d654248280dc6e"}, + {file = "orjson-3.10.14-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:901e826cb2f1bdc1fcef3ef59adf0c451e8f7c0b5deb26c1a933fb66fb505eae"}, + {file = "orjson-3.10.14-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:26336c0d4b2d44636e1e1e6ed1002f03c6aae4a8a9329561c8883f135e9ff010"}, + {file = "orjson-3.10.14-cp313-cp313-win32.whl", hash = "sha256:e2bc525e335a8545c4e48f84dd0328bc46158c9aaeb8a1c2276546e94540ea3d"}, + {file = "orjson-3.10.14-cp313-cp313-win_amd64.whl", hash = "sha256:eca04dfd792cedad53dc9a917da1a522486255360cb4e77619343a20d9f35364"}, + {file = "orjson-3.10.14-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9a0fba3b8a587a54c18585f077dcab6dd251c170d85cfa4d063d5746cd595a0f"}, + {file = "orjson-3.10.14-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:175abf3d20e737fec47261d278f95031736a49d7832a09ab684026528c4d96db"}, + {file = "orjson-3.10.14-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:29ca1a93e035d570e8b791b6c0feddd403c6a5388bfe870bf2aa6bba1b9d9b8e"}, + {file = "orjson-3.10.14-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f77202c80e8ab5a1d1e9faf642343bee5aaf332061e1ada4e9147dbd9eb00c46"}, + {file = "orjson-3.10.14-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e2ec73b7099b6a29b40a62e08a23b936423bd35529f8f55c42e27acccde7954"}, + {file = "orjson-3.10.14-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2d1679df9f9cd9504f8dff24555c1eaabba8aad7f5914f28dab99e3c2552c9d"}, + {file = "orjson-3.10.14-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:691ab9a13834310a263664313e4f747ceb93662d14a8bdf20eb97d27ed488f16"}, + {file = "orjson-3.10.14-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:b11ed82054fce82fb74cea33247d825d05ad6a4015ecfc02af5fbce442fbf361"}, + {file = "orjson-3.10.14-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:e70a1d62b8288677d48f3bea66c21586a5f999c64ecd3878edb7393e8d1b548d"}, + {file = "orjson-3.10.14-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:16642f10c1ca5611251bd835de9914a4b03095e28a34c8ba6a5500b5074338bd"}, + {file = "orjson-3.10.14-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3871bad546aa66c155e3f36f99c459780c2a392d502a64e23fb96d9abf338511"}, + {file = "orjson-3.10.14-cp38-cp38-win32.whl", hash = "sha256:0293a88815e9bb5c90af4045f81ed364d982f955d12052d989d844d6c4e50945"}, + {file = "orjson-3.10.14-cp38-cp38-win_amd64.whl", hash = "sha256:6169d3868b190d6b21adc8e61f64e3db30f50559dfbdef34a1cd6c738d409dfc"}, + {file = "orjson-3.10.14-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:06d4ec218b1ec1467d8d64da4e123b4794c781b536203c309ca0f52819a16c03"}, + {file = "orjson-3.10.14-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:962c2ec0dcaf22b76dee9831fdf0c4a33d4bf9a257a2bc5d4adc00d5c8ad9034"}, + {file = "orjson-3.10.14-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:21d3be4132f71ef1360385770474f29ea1538a242eef72ac4934fe142800e37f"}, + {file = "orjson-3.10.14-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c28ed60597c149a9e3f5ad6dd9cebaee6fb2f0e3f2d159a4a2b9b862d4748860"}, + {file = "orjson-3.10.14-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e947f70167fe18469f2023644e91ab3d24f9aed69a5e1c78e2c81b9cea553fb"}, + {file = "orjson-3.10.14-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64410696c97a35af2432dea7bdc4ce32416458159430ef1b4beb79fd30093ad6"}, + {file = "orjson-3.10.14-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8050a5d81c022561ee29cd2739de5b4445f3c72f39423fde80a63299c1892c52"}, + {file = "orjson-3.10.14-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b49a28e30d3eca86db3fe6f9b7f4152fcacbb4a467953cd1b42b94b479b77956"}, + {file = "orjson-3.10.14-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:ca041ad20291a65d853a9523744eebc3f5a4b2f7634e99f8fe88320695ddf766"}, + {file = "orjson-3.10.14-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:d313a2998b74bb26e9e371851a173a9b9474764916f1fc7971095699b3c6e964"}, + {file = "orjson-3.10.14-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7796692136a67b3e301ef9052bde6fe8e7bd5200da766811a3a608ffa62aaff0"}, + {file = "orjson-3.10.14-cp39-cp39-win32.whl", hash = "sha256:eee4bc767f348fba485ed9dc576ca58b0a9eac237f0e160f7a59bce628ed06b3"}, + {file = "orjson-3.10.14-cp39-cp39-win_amd64.whl", hash = "sha256:96a1c0ee30fb113b3ae3c748fd75ca74a157ff4c58476c47db4d61518962a011"}, + {file = "orjson-3.10.14.tar.gz", hash = "sha256:cf31f6f071a6b8e7aa1ead1fa27b935b48d00fbfa6a28ce856cfff2d5dd68eed"}, ] [[package]] @@ -3825,22 +3843,22 @@ files = [ [[package]] name = "protobuf" -version = "5.29.2" +version = "5.29.3" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-5.29.2-cp310-abi3-win32.whl", hash = "sha256:c12ba8249f5624300cf51c3d0bfe5be71a60c63e4dcf51ffe9a68771d958c851"}, - {file = "protobuf-5.29.2-cp310-abi3-win_amd64.whl", hash = "sha256:842de6d9241134a973aab719ab42b008a18a90f9f07f06ba480df268f86432f9"}, - {file = "protobuf-5.29.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a0c53d78383c851bfa97eb42e3703aefdc96d2036a41482ffd55dc5f529466eb"}, - {file = "protobuf-5.29.2-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:494229ecd8c9009dd71eda5fd57528395d1eacdf307dbece6c12ad0dd09e912e"}, - {file = "protobuf-5.29.2-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:b6b0d416bbbb9d4fbf9d0561dbfc4e324fd522f61f7af0fe0f282ab67b22477e"}, - {file = "protobuf-5.29.2-cp38-cp38-win32.whl", hash = "sha256:e621a98c0201a7c8afe89d9646859859be97cb22b8bf1d8eacfd90d5bda2eb19"}, - {file = "protobuf-5.29.2-cp38-cp38-win_amd64.whl", hash = "sha256:13d6d617a2a9e0e82a88113d7191a1baa1e42c2cc6f5f1398d3b054c8e7e714a"}, - {file = "protobuf-5.29.2-cp39-cp39-win32.whl", hash = "sha256:36000f97ea1e76e8398a3f02936aac2a5d2b111aae9920ec1b769fc4a222c4d9"}, - {file = "protobuf-5.29.2-cp39-cp39-win_amd64.whl", hash = "sha256:2d2e674c58a06311c8e99e74be43e7f3a8d1e2b2fdf845eaa347fbd866f23355"}, - {file = "protobuf-5.29.2-py3-none-any.whl", hash = "sha256:fde4554c0e578a5a0bcc9a276339594848d1e89f9ea47b4427c80e5d72f90181"}, - {file = "protobuf-5.29.2.tar.gz", hash = "sha256:b2cc8e8bb7c9326996f0e160137b0861f1a82162502658df2951209d0cb0309e"}, + {file = "protobuf-5.29.3-cp310-abi3-win32.whl", hash = "sha256:3ea51771449e1035f26069c4c7fd51fba990d07bc55ba80701c78f886bf9c888"}, + {file = "protobuf-5.29.3-cp310-abi3-win_amd64.whl", hash = "sha256:a4fa6f80816a9a0678429e84973f2f98cbc218cca434abe8db2ad0bffc98503a"}, + {file = "protobuf-5.29.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a8434404bbf139aa9e1300dbf989667a83d42ddda9153d8ab76e0d5dcaca484e"}, + {file = "protobuf-5.29.3-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:daaf63f70f25e8689c072cfad4334ca0ac1d1e05a92fc15c54eb9cf23c3efd84"}, + {file = "protobuf-5.29.3-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:c027e08a08be10b67c06bf2370b99c811c466398c357e615ca88c91c07f0910f"}, + {file = "protobuf-5.29.3-cp38-cp38-win32.whl", hash = "sha256:84a57163a0ccef3f96e4b6a20516cedcf5bb3a95a657131c5c3ac62200d23252"}, + {file = "protobuf-5.29.3-cp38-cp38-win_amd64.whl", hash = "sha256:b89c115d877892a512f79a8114564fb435943b59067615894c3b13cd3e1fa107"}, + {file = "protobuf-5.29.3-cp39-cp39-win32.whl", hash = "sha256:0eb32bfa5219fc8d4111803e9a690658aa2e6366384fd0851064b963b6d1f2a7"}, + {file = "protobuf-5.29.3-cp39-cp39-win_amd64.whl", hash = "sha256:6ce8cc3389a20693bfde6c6562e03474c40851b44975c9b2bf6df7d8c4f864da"}, + {file = "protobuf-5.29.3-py3-none-any.whl", hash = "sha256:0a18ed4a24198528f2333802eb075e59dea9d679ab7a6c5efb017a59004d849f"}, + {file = "protobuf-5.29.3.tar.gz", hash = "sha256:5da0f41edaf117bde316404bad1a486cb4ededf8e4a54891296f648e8e076620"}, ] [[package]] @@ -3994,53 +4012,53 @@ tests = ["pytest"] [[package]] name = "pyarrow" -version = "18.1.0" +version = "19.0.0" description = "Python library for Apache Arrow" optional = true python-versions = ">=3.9" files = [ - {file = "pyarrow-18.1.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:e21488d5cfd3d8b500b3238a6c4b075efabc18f0f6d80b29239737ebd69caa6c"}, - {file = "pyarrow-18.1.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:b516dad76f258a702f7ca0250885fc93d1fa5ac13ad51258e39d402bd9e2e1e4"}, - {file = "pyarrow-18.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f443122c8e31f4c9199cb23dca29ab9427cef990f283f80fe15b8e124bcc49b"}, - {file = "pyarrow-18.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a03da7f2758645d17b7b4f83c8bffeae5bbb7f974523fe901f36288d2eab71"}, - {file = "pyarrow-18.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ba17845efe3aa358ec266cf9cc2800fa73038211fb27968bfa88acd09261a470"}, - {file = "pyarrow-18.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:3c35813c11a059056a22a3bef520461310f2f7eea5c8a11ef9de7062a23f8d56"}, - {file = "pyarrow-18.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9736ba3c85129d72aefa21b4f3bd715bc4190fe4426715abfff90481e7d00812"}, - {file = "pyarrow-18.1.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:eaeabf638408de2772ce3d7793b2668d4bb93807deed1725413b70e3156a7854"}, - {file = "pyarrow-18.1.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:3b2e2239339c538f3464308fd345113f886ad031ef8266c6f004d49769bb074c"}, - {file = "pyarrow-18.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f39a2e0ed32a0970e4e46c262753417a60c43a3246972cfc2d3eb85aedd01b21"}, - {file = "pyarrow-18.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e31e9417ba9c42627574bdbfeada7217ad8a4cbbe45b9d6bdd4b62abbca4c6f6"}, - {file = "pyarrow-18.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:01c034b576ce0eef554f7c3d8c341714954be9b3f5d5bc7117006b85fcf302fe"}, - {file = "pyarrow-18.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:f266a2c0fc31995a06ebd30bcfdb7f615d7278035ec5b1cd71c48d56daaf30b0"}, - {file = "pyarrow-18.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:d4f13eee18433f99adefaeb7e01d83b59f73360c231d4782d9ddfaf1c3fbde0a"}, - {file = "pyarrow-18.1.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:9f3a76670b263dc41d0ae877f09124ab96ce10e4e48f3e3e4257273cee61ad0d"}, - {file = "pyarrow-18.1.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:da31fbca07c435be88a0c321402c4e31a2ba61593ec7473630769de8346b54ee"}, - {file = "pyarrow-18.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:543ad8459bc438efc46d29a759e1079436290bd583141384c6f7a1068ed6f992"}, - {file = "pyarrow-18.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0743e503c55be0fdb5c08e7d44853da27f19dc854531c0570f9f394ec9671d54"}, - {file = "pyarrow-18.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:d4b3d2a34780645bed6414e22dda55a92e0fcd1b8a637fba86800ad737057e33"}, - {file = "pyarrow-18.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:c52f81aa6f6575058d8e2c782bf79d4f9fdc89887f16825ec3a66607a5dd8e30"}, - {file = "pyarrow-18.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:0ad4892617e1a6c7a551cfc827e072a633eaff758fa09f21c4ee548c30bcaf99"}, - {file = "pyarrow-18.1.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:84e314d22231357d473eabec709d0ba285fa706a72377f9cc8e1cb3c8013813b"}, - {file = "pyarrow-18.1.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:f591704ac05dfd0477bb8f8e0bd4b5dc52c1cadf50503858dce3a15db6e46ff2"}, - {file = "pyarrow-18.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:acb7564204d3c40babf93a05624fc6a8ec1ab1def295c363afc40b0c9e66c191"}, - {file = "pyarrow-18.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74de649d1d2ccb778f7c3afff6085bd5092aed4c23df9feeb45dd6b16f3811aa"}, - {file = "pyarrow-18.1.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:f96bd502cb11abb08efea6dab09c003305161cb6c9eafd432e35e76e7fa9b90c"}, - {file = "pyarrow-18.1.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:36ac22d7782554754a3b50201b607d553a8d71b78cdf03b33c1125be4b52397c"}, - {file = "pyarrow-18.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:25dbacab8c5952df0ca6ca0af28f50d45bd31c1ff6fcf79e2d120b4a65ee7181"}, - {file = "pyarrow-18.1.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:6a276190309aba7bc9d5bd2933230458b3521a4317acfefe69a354f2fe59f2bc"}, - {file = "pyarrow-18.1.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:ad514dbfcffe30124ce655d72771ae070f30bf850b48bc4d9d3b25993ee0e386"}, - {file = "pyarrow-18.1.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aebc13a11ed3032d8dd6e7171eb6e86d40d67a5639d96c35142bd568b9299324"}, - {file = "pyarrow-18.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6cf5c05f3cee251d80e98726b5c7cc9f21bab9e9783673bac58e6dfab57ecc8"}, - {file = "pyarrow-18.1.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:11b676cd410cf162d3f6a70b43fb9e1e40affbc542a1e9ed3681895f2962d3d9"}, - {file = "pyarrow-18.1.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:b76130d835261b38f14fc41fdfb39ad8d672afb84c447126b84d5472244cfaba"}, - {file = "pyarrow-18.1.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:0b331e477e40f07238adc7ba7469c36b908f07c89b95dd4bd3a0ec84a3d1e21e"}, - {file = "pyarrow-18.1.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:2c4dd0c9010a25ba03e198fe743b1cc03cd33c08190afff371749c52ccbbaf76"}, - {file = "pyarrow-18.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f97b31b4c4e21ff58c6f330235ff893cc81e23da081b1a4b1c982075e0ed4e9"}, - {file = "pyarrow-18.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a4813cb8ecf1809871fd2d64a8eff740a1bd3691bbe55f01a3cf6c5ec869754"}, - {file = "pyarrow-18.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:05a5636ec3eb5cc2a36c6edb534a38ef57b2ab127292a716d00eabb887835f1e"}, - {file = "pyarrow-18.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:73eeed32e724ea3568bb06161cad5fa7751e45bc2228e33dcb10c614044165c7"}, - {file = "pyarrow-18.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:a1880dd6772b685e803011a6b43a230c23b566859a6e0c9a276c1e0faf4f4052"}, - {file = "pyarrow-18.1.0.tar.gz", hash = "sha256:9386d3ca9c145b5539a1cfc75df07757dff870168c959b473a0bccbc3abc8c73"}, + {file = "pyarrow-19.0.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:c318eda14f6627966997a7d8c374a87d084a94e4e38e9abbe97395c215830e0c"}, + {file = "pyarrow-19.0.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:62ef8360ff256e960f57ce0299090fb86423afed5e46f18f1225f960e05aae3d"}, + {file = "pyarrow-19.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2795064647add0f16563e57e3d294dbfc067b723f0fd82ecd80af56dad15f503"}, + {file = "pyarrow-19.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a218670b26fb1bc74796458d97bcab072765f9b524f95b2fccad70158feb8b17"}, + {file = "pyarrow-19.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:66732e39eaa2247996a6b04c8aa33e3503d351831424cdf8d2e9a0582ac54b34"}, + {file = "pyarrow-19.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:e675a3ad4732b92d72e4d24009707e923cab76b0d088e5054914f11a797ebe44"}, + {file = "pyarrow-19.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:f094742275586cdd6b1a03655ccff3b24b2610c3af76f810356c4c71d24a2a6c"}, + {file = "pyarrow-19.0.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:8e3a839bf36ec03b4315dc924d36dcde5444a50066f1c10f8290293c0427b46a"}, + {file = "pyarrow-19.0.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:ce42275097512d9e4e4a39aade58ef2b3798a93aa3026566b7892177c266f735"}, + {file = "pyarrow-19.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9348a0137568c45601b031a8d118275069435f151cbb77e6a08a27e8125f59d4"}, + {file = "pyarrow-19.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a0144a712d990d60f7f42b7a31f0acaccf4c1e43e957f7b1ad58150d6f639c1"}, + {file = "pyarrow-19.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2a1a109dfda558eb011e5f6385837daffd920d54ca00669f7a11132d0b1e6042"}, + {file = "pyarrow-19.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:be686bf625aa7b9bada18defb3a3ea3981c1099697239788ff111d87f04cd263"}, + {file = "pyarrow-19.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:239ca66d9a05844bdf5af128861af525e14df3c9591bcc05bac25918e650d3a2"}, + {file = "pyarrow-19.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:a7bbe7109ab6198688b7079cbad5a8c22de4d47c4880d8e4847520a83b0d1b68"}, + {file = "pyarrow-19.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:4624c89d6f777c580e8732c27bb8e77fd1433b89707f17c04af7635dd9638351"}, + {file = "pyarrow-19.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b6d3ce4288793350dc2d08d1e184fd70631ea22a4ff9ea5c4ff182130249d9b"}, + {file = "pyarrow-19.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:450a7d27e840e4d9a384b5c77199d489b401529e75a3b7a3799d4cd7957f2f9c"}, + {file = "pyarrow-19.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:a08e2a8a039a3f72afb67a6668180f09fddaa38fe0d21f13212b4aba4b5d2451"}, + {file = "pyarrow-19.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:f43f5aef2a13d4d56adadae5720d1fed4c1356c993eda8b59dace4b5983843c1"}, + {file = "pyarrow-19.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:2f672f5364b2d7829ef7c94be199bb88bf5661dd485e21d2d37de12ccb78a136"}, + {file = "pyarrow-19.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:cf3bf0ce511b833f7bc5f5bb3127ba731e97222023a444b7359f3a22e2a3b463"}, + {file = "pyarrow-19.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:4d8b0c0de0a73df1f1bf439af1b60f273d719d70648e898bc077547649bb8352"}, + {file = "pyarrow-19.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92aff08e23d281c69835e4a47b80569242a504095ef6a6223c1f6bb8883431d"}, + {file = "pyarrow-19.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3b78eff5968a1889a0f3bc81ca57e1e19b75f664d9c61a42a604bf9d8402aae"}, + {file = "pyarrow-19.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:b34d3bde38eba66190b215bae441646330f8e9da05c29e4b5dd3e41bde701098"}, + {file = "pyarrow-19.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:5418d4d0fab3a0ed497bad21d17a7973aad336d66ad4932a3f5f7480d4ca0c04"}, + {file = "pyarrow-19.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:e82c3d5e44e969c217827b780ed8faf7ac4c53f934ae9238872e749fa531f7c9"}, + {file = "pyarrow-19.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:f208c3b58a6df3b239e0bb130e13bc7487ed14f39a9ff357b6415e3f6339b560"}, + {file = "pyarrow-19.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:c751c1c93955b7a84c06794df46f1cec93e18610dcd5ab7d08e89a81df70a849"}, + {file = "pyarrow-19.0.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b903afaa5df66d50fc38672ad095806443b05f202c792694f3a604ead7c6ea6e"}, + {file = "pyarrow-19.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a22a4bc0937856263df8b94f2f2781b33dd7f876f787ed746608e06902d691a5"}, + {file = "pyarrow-19.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:5e8a28b918e2e878c918f6d89137386c06fe577cd08d73a6be8dafb317dc2d73"}, + {file = "pyarrow-19.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:29cd86c8001a94f768f79440bf83fee23963af5e7bc68ce3a7e5f120e17edf89"}, + {file = "pyarrow-19.0.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:c0423393e4a07ff6fea08feb44153302dd261d0551cc3b538ea7a5dc853af43a"}, + {file = "pyarrow-19.0.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:718947fb6d82409013a74b176bf93e0f49ef952d8a2ecd068fecd192a97885b7"}, + {file = "pyarrow-19.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c1c162c4660e0978411a4761f91113dde8da3433683efa473501254563dcbe8"}, + {file = "pyarrow-19.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c73268cf557e688efb60f1ccbc7376f7e18cd8e2acae9e663e98b194c40c1a2d"}, + {file = "pyarrow-19.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:edfe6d3916e915ada9acc4e48f6dafca7efdbad2e6283db6fd9385a1b23055f1"}, + {file = "pyarrow-19.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:da410b70a7ab8eb524112f037a7a35da7128b33d484f7671a264a4c224ac131d"}, + {file = "pyarrow-19.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:597360ffc71fc8cceea1aec1fb60cb510571a744fffc87db33d551d5de919bec"}, + {file = "pyarrow-19.0.0.tar.gz", hash = "sha256:8d47c691765cf497aaeed4954d226568563f1b3b74ff61139f2d77876717084b"}, ] [package.extras] @@ -4297,13 +4315,13 @@ files = [ [[package]] name = "pyright" -version = "1.1.391" +version = "1.1.392.post0" description = "Command line wrapper for pyright" optional = true python-versions = ">=3.7" files = [ - {file = "pyright-1.1.391-py3-none-any.whl", hash = "sha256:54fa186f8b3e8a55a44ebfa842636635688670c6896dcf6cf4a7fc75062f4d15"}, - {file = "pyright-1.1.391.tar.gz", hash = "sha256:66b2d42cdf5c3cbab05f2f4b76e8bec8aa78e679bfa0b6ad7b923d9e027cadb2"}, + {file = "pyright-1.1.392.post0-py3-none-any.whl", hash = "sha256:252f84458a46fa2f0fd4e2f91fc74f50b9ca52c757062e93f6c250c0d8329eb2"}, + {file = "pyright-1.1.392.post0.tar.gz", hash = "sha256:3b7f88de74a28dcfa90c7d90c782b6569a48c2be5f9d4add38472bdaac247ebd"}, ] [package.dependencies] @@ -4385,28 +4403,28 @@ pytest = {version = ">=6.2.4", markers = "python_version >= \"3.10\""} [[package]] name = "python-box" -version = "7.3.0" +version = "7.3.1" description = "Advanced Python dictionaries with dot notation access" optional = false python-versions = ">=3.9" files = [ - {file = "python_box-7.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a2131477ed02aa3609b348dad0697b70d84968d6440387898bb9075f461ef9bf"}, - {file = "python_box-7.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3284cf583476af63c4f24168b6e1307503322dccd9b3dc2c924f5e69f79e7ab5"}, - {file = "python_box-7.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:2718cf4c8dcc091d1c56a1a297804ab7973271391a2d2d34d37740820bbd1fda"}, - {file = "python_box-7.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e40fe08b218b3d07a50d6eb1c62edce8d0636d6bd1e563907bc86018a78e5826"}, - {file = "python_box-7.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd13e2b964ed527e03409cb1fb386d8723e0e69caf0f507af60d64102c13d363"}, - {file = "python_box-7.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:d661fb9c6ff6c730b53fe859754624baa14e37ee3d593525382b20194efad367"}, - {file = "python_box-7.3.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:6c3809f78f7c829e45626990a891d93214748938b9c0236dc6d0f2e8c400d325"}, - {file = "python_box-7.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c233b94bf3b95d7d9dc01ed1ee5636800174345810b319eb87219b760edbb54f"}, - {file = "python_box-7.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:9a22cc82e78225a419c4da02f53d6beb5c5cbd2fe5f63c13dab81e4f27b8c929"}, - {file = "python_box-7.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1f7b93c5ab4027b12ba67baffa8db903557e557250e01b91226d7a1b9688cf77"}, - {file = "python_box-7.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71ed234c1cff7f7197103bb11d98559032c0beac34db0c62dd5bd53e2b2a6963"}, - {file = "python_box-7.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:1144c9e5d40a2cbe34d1ec9a13abfc557e8e9e2fbf15f14314c87b6113de178f"}, - {file = "python_box-7.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:df77730baabf45b1682ead1c470e84a530f8ceb0295263a89f0ebc04ef7f363c"}, - {file = "python_box-7.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36bef944e61672b300c1d56d16db8a43ee4af9ab5678492a5e003368d2c64a6e"}, - {file = "python_box-7.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:b35a2262a4e1ccfba90ce8e2018aa367f8a46a519632884006fa3153b266f184"}, - {file = "python_box-7.3.0-py3-none-any.whl", hash = "sha256:b1139bffe91bd317fd686c4c29ffc84115c1967af14112c5c4a8ac51937d530c"}, - {file = "python_box-7.3.0.tar.gz", hash = "sha256:39a85ba457d07122226ca60597882d763549713ab56ac7d55da41c4ad0e89a05"}, + {file = "python_box-7.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fadf589c5d37d5bf40d25f6580d500168f2fc825d2f601c25e753ffc8d4bbec0"}, + {file = "python_box-7.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d375605b159c174b0d60b6acb3586bc47ba75f542b614e96fac2ef899c08add8"}, + {file = "python_box-7.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:f7fef93deb2695716218f513cc43e665f447a85e41cf58219e42e026c570bd67"}, + {file = "python_box-7.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7cdcc0585d5840a04a74e64301d4ec5b0a05bc98a305d0f9516d3e59d265add1"}, + {file = "python_box-7.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aa85d0f1f0ea1ef4af33c0f3a133b8cec8f0ad3bfd6868370833efb8b9f86b3"}, + {file = "python_box-7.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:6fd0463e20a4c990591094fbb0f4e3b39f8212d1faf69648df4ffac10912c49e"}, + {file = "python_box-7.3.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3320d3fa83f006ae44bda02f9ee08647ed709506baf5ae85be3eb045683dd12b"}, + {file = "python_box-7.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6277ef305fb1cc75e903416e0b4f59952675d55e8ae997924f4e2f6e5abf61b"}, + {file = "python_box-7.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:34d409137b41c15322491f353c331069a07d194573e95e56eae07fe101c04cbe"}, + {file = "python_box-7.3.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e5e0c2bf73ab1020fc62f2a7161b8b0e12ee29872292ec33fb8124aa81adb48e"}, + {file = "python_box-7.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fe1e1c705535ec5ab9fa66172cf184a330fd41638aaf638a08e33a12c7c3f71"}, + {file = "python_box-7.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:4fccc0b218937a6254219073f945117978f5222eff1bbae8a35b11c6e9651f5d"}, + {file = "python_box-7.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a48050391cb4d8dcec4b0f8c860b778821ae013a293d49f0cbaeab5548c46829"}, + {file = "python_box-7.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a5bf3264cd4ee9b742aefadb7ff549297dd7eef8826b3a4b922a4a44e9b0751"}, + {file = "python_box-7.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:0ed2024e27d67c5cf1ed1f88d8849aace9234d7a198fd4d5c791ed12e99e7345"}, + {file = "python_box-7.3.1-py3-none-any.whl", hash = "sha256:2d77100d0d5ad67e0d062fac4f77f973851db236f4a445c60b02d0415f83b0d6"}, + {file = "python_box-7.3.1.tar.gz", hash = "sha256:a0bd9dbb4ddd2842f8d0143b8aa0c87d0e82e39093dd4698a5cbbb2d2ac71361"}, ] [package.extras] @@ -4748,18 +4766,19 @@ prompt_toolkit = ">=2.0,<4.0" [[package]] name = "referencing" -version = "0.35.1" +version = "0.36.0" description = "JSON Referencing + Python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"}, - {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"}, + {file = "referencing-0.36.0-py3-none-any.whl", hash = "sha256:01fc2916bab821aa3284d645bbbb41ba39609e7ff47072416a39ec2fb04d10d9"}, + {file = "referencing-0.36.0.tar.gz", hash = "sha256:246db964bb6101905167895cd66499cfb2aabc5f83277d008c52afe918ef29ba"}, ] [package.dependencies] attrs = ">=22.2.0" rpds-py = ">=0.7.0" +typing-extensions = {version = "*", markers = "python_version < \"3.13\""} [[package]] name = "regex" @@ -5172,53 +5191,72 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.36" +version = "2.0.37" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-win32.whl", hash = "sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-win_amd64.whl", hash = "sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-win32.whl", hash = "sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-win_amd64.whl", hash = "sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-win32.whl", hash = "sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-win_amd64.whl", hash = "sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-win32.whl", hash = "sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-win_amd64.whl", hash = "sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a"}, - {file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"}, - {file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da36c3b0e891808a7542c5c89f224520b9a16c7f5e4d6a1156955605e54aef0e"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e7402ff96e2b073a98ef6d6142796426d705addd27b9d26c3b32dbaa06d7d069"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6f5d254a22394847245f411a2956976401e84da4288aa70cbcd5190744062c1"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41296bbcaa55ef5fdd32389a35c710133b097f7b2609d8218c0eabded43a1d84"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bedee60385c1c0411378cbd4dc486362f5ee88deceea50002772912d798bb00f"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6c67415258f9f3c69867ec02fea1bf6508153709ecbd731a982442a590f2b7e4"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-win32.whl", hash = "sha256:650dcb70739957a492ad8acff65d099a9586b9b8920e3507ca61ec3ce650bb72"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-win_amd64.whl", hash = "sha256:93d1543cd8359040c02b6614421c8e10cd7a788c40047dbc507ed46c29ae5636"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:78361be6dc9073ed17ab380985d1e45e48a642313ab68ab6afa2457354ff692c"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b661b49d0cb0ab311a189b31e25576b7ac3e20783beb1e1817d72d9d02508bf5"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d57bafbab289e147d064ffbd5cca2d7b1394b63417c0636cea1f2e93d16eb9e8"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fa2c0913f02341d25fb858e4fb2031e6b0813494cca1ba07d417674128ce11b"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9df21b8d9e5c136ea6cde1c50d2b1c29a2b5ff2b1d610165c23ff250e0704087"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:db18ff6b8c0f1917f8b20f8eca35c28bbccb9f83afa94743e03d40203ed83de9"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-win32.whl", hash = "sha256:46954173612617a99a64aee103bcd3f078901b9a8dcfc6ae80cbf34ba23df989"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-win_amd64.whl", hash = "sha256:7b7e772dc4bc507fdec4ee20182f15bd60d2a84f1e087a8accf5b5b7a0dcf2ba"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2952748ecd67ed3b56773c185e85fc084f6bdcdec10e5032a7c25a6bc7d682ef"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3151822aa1db0eb5afd65ccfafebe0ef5cda3a7701a279c8d0bf17781a793bb4"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eaa8039b6d20137a4e02603aba37d12cd2dde7887500b8855356682fc33933f4"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cdba1f73b64530c47b27118b7053b8447e6d6f3c8104e3ac59f3d40c33aa9fd"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1b2690456528a87234a75d1a1644cdb330a6926f455403c8e4f6cad6921f9098"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cf5ae8a9dcf657fd72144a7fd01f243236ea39e7344e579a121c4205aedf07bb"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-win32.whl", hash = "sha256:ea308cec940905ba008291d93619d92edaf83232ec85fbd514dcb329f3192761"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-win_amd64.whl", hash = "sha256:635d8a21577341dfe4f7fa59ec394b346da12420b86624a69e466d446de16aff"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8c4096727193762e72ce9437e2a86a110cf081241919ce3fab8e89c02f6b6658"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e4fb5ac86d8fe8151966814f6720996430462e633d225497566b3996966b9bdb"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e56a139bfe136a22c438478a86f8204c1eb5eed36f4e15c4224e4b9db01cb3e4"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f95fc8e3f34b5f6b3effb49d10ac97c569ec8e32f985612d9b25dd12d0d2e94"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c505edd429abdfe3643fa3b2e83efb3445a34a9dc49d5f692dd087be966020e0"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:12b0f1ec623cccf058cf21cb544f0e74656618165b083d78145cafde156ea7b6"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-win32.whl", hash = "sha256:293f9ade06b2e68dd03cfb14d49202fac47b7bb94bffcff174568c951fbc7af2"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-win_amd64.whl", hash = "sha256:d70f53a0646cc418ca4853da57cf3ddddbccb8c98406791f24426f2dd77fd0e2"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:44f569d0b1eb82301b92b72085583277316e7367e038d97c3a1a899d9a05e342"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2eae3423e538c10d93ae3e87788c6a84658c3ed6db62e6a61bb9495b0ad16bb"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dfff7be361048244c3aa0f60b5e63221c5e0f0e509f4e47b8910e22b57d10ae7"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:5bc3339db84c5fb9130ac0e2f20347ee77b5dd2596ba327ce0d399752f4fce39"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:84b9f23b0fa98a6a4b99d73989350a94e4a4ec476b9a7dfe9b79ba5939f5e80b"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-win32.whl", hash = "sha256:51bc9cfef83e0ac84f86bf2b10eaccb27c5a3e66a1212bef676f5bee6ef33ebb"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-win_amd64.whl", hash = "sha256:8e47f1af09444f87c67b4f1bb6231e12ba6d4d9f03050d7fc88df6d075231a49"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6b788f14c5bb91db7f468dcf76f8b64423660a05e57fe277d3f4fad7b9dcb7ce"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521ef85c04c33009166777c77e76c8a676e2d8528dc83a57836b63ca9c69dcd1"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75311559f5c9881a9808eadbeb20ed8d8ba3f7225bef3afed2000c2a9f4d49b9"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cce918ada64c956b62ca2c2af59b125767097ec1dca89650a6221e887521bfd7"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:9d087663b7e1feabea8c578d6887d59bb00388158e8bff3a76be11aa3f748ca2"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:cf95a60b36997dad99692314c4713f141b61c5b0b4cc5c3426faad570b31ca01"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-win32.whl", hash = "sha256:d75ead7dd4d255068ea0f21492ee67937bd7c90964c8f3c2bea83c7b7f81b95f"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-win_amd64.whl", hash = "sha256:74bbd1d0a9bacf34266a7907d43260c8d65d31d691bb2356f41b17c2dca5b1d0"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:648ec5acf95ad59255452ef759054f2176849662af4521db6cb245263ae4aa33"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:35bd2df269de082065d4b23ae08502a47255832cc3f17619a5cea92ce478b02b"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f581d365af9373a738c49e0c51e8b18e08d8a6b1b15cc556773bcd8a192fa8b"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82df02816c14f8dc9f4d74aea4cb84a92f4b0620235daa76dde002409a3fbb5a"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:94b564e38b344d3e67d2e224f0aec6ba09a77e4582ced41e7bfd0f757d926ec9"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:955a2a765aa1bd81aafa69ffda179d4fe3e2a3ad462a736ae5b6f387f78bfeb8"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-win32.whl", hash = "sha256:03f0528c53ca0b67094c4764523c1451ea15959bbf0a8a8a3096900014db0278"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-win_amd64.whl", hash = "sha256:4b12885dc85a2ab2b7d00995bac6d967bffa8594123b02ed21e8eb2205a7584b"}, + {file = "SQLAlchemy-2.0.37-py3-none-any.whl", hash = "sha256:a8998bf9f8658bd3839cbc44ddbe982955641863da0c1efe5b00c1ab4f5c16b1"}, + {file = "sqlalchemy-2.0.37.tar.gz", hash = "sha256:12b28d99a9c14eaf4055810df1001557176716de0167b91026e648e65229bffb"}, ] [package.dependencies] -greenlet = {version = "!=0.4.17", optional = true, markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\") or extra == \"asyncio\""} +greenlet = {version = "!=0.4.17", optional = true, markers = "python_version < \"3.14\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\") or extra == \"asyncio\""} typing-extensions = ">=4.6.0" [package.extras] @@ -5636,13 +5674,13 @@ standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", [[package]] name = "virtualenv" -version = "20.28.1" +version = "20.29.0" description = "Virtual Python Environment builder" optional = true python-versions = ">=3.8" files = [ - {file = "virtualenv-20.28.1-py3-none-any.whl", hash = "sha256:412773c85d4dab0409b83ec36f7a6499e72eaf08c80e81e9576bca61831c71cb"}, - {file = "virtualenv-20.28.1.tar.gz", hash = "sha256:5d34ab240fdb5d21549b76f9e8ff3af28252f5499fb6d6f031adac4e5a8c5329"}, + {file = "virtualenv-20.29.0-py3-none-any.whl", hash = "sha256:c12311863497992dc4b8644f8ea82d3b35bb7ef8ee82e6630d76d0197c39baf9"}, + {file = "virtualenv-20.29.0.tar.gz", hash = "sha256:6345e1ff19d4b1296954cee076baaf58ff2a12a84a338c62b02eda39f20aa982"}, ] [package.dependencies] @@ -5795,76 +5833,90 @@ requests = ">=2.0.0,<3.0.0" [[package]] name = "wrapt" -version = "1.17.0" +version = "1.17.2" description = "Module for decorators, wrappers and monkey patching." optional = false python-versions = ">=3.8" files = [ - {file = "wrapt-1.17.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2a0c23b8319848426f305f9cb0c98a6e32ee68a36264f45948ccf8e7d2b941f8"}, - {file = "wrapt-1.17.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1ca5f060e205f72bec57faae5bd817a1560fcfc4af03f414b08fa29106b7e2d"}, - {file = "wrapt-1.17.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e185ec6060e301a7e5f8461c86fb3640a7beb1a0f0208ffde7a65ec4074931df"}, - {file = "wrapt-1.17.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb90765dd91aed05b53cd7a87bd7f5c188fcd95960914bae0d32c5e7f899719d"}, - {file = "wrapt-1.17.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:879591c2b5ab0a7184258274c42a126b74a2c3d5a329df16d69f9cee07bba6ea"}, - {file = "wrapt-1.17.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fce6fee67c318fdfb7f285c29a82d84782ae2579c0e1b385b7f36c6e8074fffb"}, - {file = "wrapt-1.17.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0698d3a86f68abc894d537887b9bbf84d29bcfbc759e23f4644be27acf6da301"}, - {file = "wrapt-1.17.0-cp310-cp310-win32.whl", hash = "sha256:69d093792dc34a9c4c8a70e4973a3361c7a7578e9cd86961b2bbf38ca71e4e22"}, - {file = "wrapt-1.17.0-cp310-cp310-win_amd64.whl", hash = "sha256:f28b29dc158ca5d6ac396c8e0a2ef45c4e97bb7e65522bfc04c989e6fe814575"}, - {file = "wrapt-1.17.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:74bf625b1b4caaa7bad51d9003f8b07a468a704e0644a700e936c357c17dd45a"}, - {file = "wrapt-1.17.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f2a28eb35cf99d5f5bd12f5dd44a0f41d206db226535b37b0c60e9da162c3ed"}, - {file = "wrapt-1.17.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81b1289e99cf4bad07c23393ab447e5e96db0ab50974a280f7954b071d41b489"}, - {file = "wrapt-1.17.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f2939cd4a2a52ca32bc0b359015718472d7f6de870760342e7ba295be9ebaf9"}, - {file = "wrapt-1.17.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6a9653131bda68a1f029c52157fd81e11f07d485df55410401f745007bd6d339"}, - {file = "wrapt-1.17.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4e4b4385363de9052dac1a67bfb535c376f3d19c238b5f36bddc95efae15e12d"}, - {file = "wrapt-1.17.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bdf62d25234290db1837875d4dceb2151e4ea7f9fff2ed41c0fde23ed542eb5b"}, - {file = "wrapt-1.17.0-cp311-cp311-win32.whl", hash = "sha256:5d8fd17635b262448ab8f99230fe4dac991af1dabdbb92f7a70a6afac8a7e346"}, - {file = "wrapt-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:92a3d214d5e53cb1db8b015f30d544bc9d3f7179a05feb8f16df713cecc2620a"}, - {file = "wrapt-1.17.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:89fc28495896097622c3fc238915c79365dd0ede02f9a82ce436b13bd0ab7569"}, - {file = "wrapt-1.17.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:875d240fdbdbe9e11f9831901fb8719da0bd4e6131f83aa9f69b96d18fae7504"}, - {file = "wrapt-1.17.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5ed16d95fd142e9c72b6c10b06514ad30e846a0d0917ab406186541fe68b451"}, - {file = "wrapt-1.17.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18b956061b8db634120b58f668592a772e87e2e78bc1f6a906cfcaa0cc7991c1"}, - {file = "wrapt-1.17.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:daba396199399ccabafbfc509037ac635a6bc18510ad1add8fd16d4739cdd106"}, - {file = "wrapt-1.17.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4d63f4d446e10ad19ed01188d6c1e1bb134cde8c18b0aa2acfd973d41fcc5ada"}, - {file = "wrapt-1.17.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8a5e7cc39a45fc430af1aefc4d77ee6bad72c5bcdb1322cfde852c15192b8bd4"}, - {file = "wrapt-1.17.0-cp312-cp312-win32.whl", hash = "sha256:0a0a1a1ec28b641f2a3a2c35cbe86c00051c04fffcfcc577ffcdd707df3f8635"}, - {file = "wrapt-1.17.0-cp312-cp312-win_amd64.whl", hash = "sha256:3c34f6896a01b84bab196f7119770fd8466c8ae3dfa73c59c0bb281e7b588ce7"}, - {file = "wrapt-1.17.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:714c12485aa52efbc0fc0ade1e9ab3a70343db82627f90f2ecbc898fdf0bb181"}, - {file = "wrapt-1.17.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da427d311782324a376cacb47c1a4adc43f99fd9d996ffc1b3e8529c4074d393"}, - {file = "wrapt-1.17.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba1739fb38441a27a676f4de4123d3e858e494fac05868b7a281c0a383c098f4"}, - {file = "wrapt-1.17.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e711fc1acc7468463bc084d1b68561e40d1eaa135d8c509a65dd534403d83d7b"}, - {file = "wrapt-1.17.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:140ea00c87fafc42739bd74a94a5a9003f8e72c27c47cd4f61d8e05e6dec8721"}, - {file = "wrapt-1.17.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:73a96fd11d2b2e77d623a7f26e004cc31f131a365add1ce1ce9a19e55a1eef90"}, - {file = "wrapt-1.17.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0b48554952f0f387984da81ccfa73b62e52817a4386d070c75e4db7d43a28c4a"}, - {file = "wrapt-1.17.0-cp313-cp313-win32.whl", hash = "sha256:498fec8da10e3e62edd1e7368f4b24aa362ac0ad931e678332d1b209aec93045"}, - {file = "wrapt-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:fd136bb85f4568fffca995bd3c8d52080b1e5b225dbf1c2b17b66b4c5fa02838"}, - {file = "wrapt-1.17.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:17fcf043d0b4724858f25b8826c36e08f9fb2e475410bece0ec44a22d533da9b"}, - {file = "wrapt-1.17.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4a557d97f12813dc5e18dad9fa765ae44ddd56a672bb5de4825527c847d6379"}, - {file = "wrapt-1.17.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0229b247b0fc7dee0d36176cbb79dbaf2a9eb7ecc50ec3121f40ef443155fb1d"}, - {file = "wrapt-1.17.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8425cfce27b8b20c9b89d77fb50e368d8306a90bf2b6eef2cdf5cd5083adf83f"}, - {file = "wrapt-1.17.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9c900108df470060174108012de06d45f514aa4ec21a191e7ab42988ff42a86c"}, - {file = "wrapt-1.17.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:4e547b447073fc0dbfcbff15154c1be8823d10dab4ad401bdb1575e3fdedff1b"}, - {file = "wrapt-1.17.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:914f66f3b6fc7b915d46c1cc424bc2441841083de01b90f9e81109c9759e43ab"}, - {file = "wrapt-1.17.0-cp313-cp313t-win32.whl", hash = "sha256:a4192b45dff127c7d69b3bdfb4d3e47b64179a0b9900b6351859f3001397dabf"}, - {file = "wrapt-1.17.0-cp313-cp313t-win_amd64.whl", hash = "sha256:4f643df3d4419ea3f856c5c3f40fec1d65ea2e89ec812c83f7767c8730f9827a"}, - {file = "wrapt-1.17.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:69c40d4655e078ede067a7095544bcec5a963566e17503e75a3a3e0fe2803b13"}, - {file = "wrapt-1.17.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f495b6754358979379f84534f8dd7a43ff8cff2558dcdea4a148a6e713a758f"}, - {file = "wrapt-1.17.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:baa7ef4e0886a6f482e00d1d5bcd37c201b383f1d314643dfb0367169f94f04c"}, - {file = "wrapt-1.17.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8fc931382e56627ec4acb01e09ce66e5c03c384ca52606111cee50d931a342d"}, - {file = "wrapt-1.17.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8f8909cdb9f1b237786c09a810e24ee5e15ef17019f7cecb207ce205b9b5fcce"}, - {file = "wrapt-1.17.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ad47b095f0bdc5585bced35bd088cbfe4177236c7df9984b3cc46b391cc60627"}, - {file = "wrapt-1.17.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:948a9bd0fb2c5120457b07e59c8d7210cbc8703243225dbd78f4dfc13c8d2d1f"}, - {file = "wrapt-1.17.0-cp38-cp38-win32.whl", hash = "sha256:5ae271862b2142f4bc687bdbfcc942e2473a89999a54231aa1c2c676e28f29ea"}, - {file = "wrapt-1.17.0-cp38-cp38-win_amd64.whl", hash = "sha256:f335579a1b485c834849e9075191c9898e0731af45705c2ebf70e0cd5d58beed"}, - {file = "wrapt-1.17.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d751300b94e35b6016d4b1e7d0e7bbc3b5e1751e2405ef908316c2a9024008a1"}, - {file = "wrapt-1.17.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7264cbb4a18dc4acfd73b63e4bcfec9c9802614572025bdd44d0721983fc1d9c"}, - {file = "wrapt-1.17.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33539c6f5b96cf0b1105a0ff4cf5db9332e773bb521cc804a90e58dc49b10578"}, - {file = "wrapt-1.17.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c30970bdee1cad6a8da2044febd824ef6dc4cc0b19e39af3085c763fdec7de33"}, - {file = "wrapt-1.17.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bc7f729a72b16ee21795a943f85c6244971724819819a41ddbaeb691b2dd85ad"}, - {file = "wrapt-1.17.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:6ff02a91c4fc9b6a94e1c9c20f62ea06a7e375f42fe57587f004d1078ac86ca9"}, - {file = "wrapt-1.17.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2dfb7cff84e72e7bf975b06b4989477873dcf160b2fd89959c629535df53d4e0"}, - {file = "wrapt-1.17.0-cp39-cp39-win32.whl", hash = "sha256:2399408ac33ffd5b200480ee858baa58d77dd30e0dd0cab6a8a9547135f30a88"}, - {file = "wrapt-1.17.0-cp39-cp39-win_amd64.whl", hash = "sha256:4f763a29ee6a20c529496a20a7bcb16a73de27f5da6a843249c7047daf135977"}, - {file = "wrapt-1.17.0-py3-none-any.whl", hash = "sha256:d2c63b93548eda58abf5188e505ffed0229bf675f7c3090f8e36ad55b8cbc371"}, - {file = "wrapt-1.17.0.tar.gz", hash = "sha256:16187aa2317c731170a88ef35e8937ae0f533c402872c1ee5e6d079fcf320801"}, + {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"}, + {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"}, + {file = "wrapt-1.17.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62"}, + {file = "wrapt-1.17.2-cp310-cp310-win32.whl", hash = "sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563"}, + {file = "wrapt-1.17.2-cp310-cp310-win_amd64.whl", hash = "sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72"}, + {file = "wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317"}, + {file = "wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9"}, + {file = "wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9"}, + {file = "wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504"}, + {file = "wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a"}, + {file = "wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f"}, + {file = "wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555"}, + {file = "wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5c803c401ea1c1c18de70a06a6f79fcc9c5acfc79133e9869e730ad7f8ad8ef9"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f917c1180fdb8623c2b75a99192f4025e412597c50b2ac870f156de8fb101119"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ecc840861360ba9d176d413a5489b9a0aff6d6303d7e733e2c4623cfa26904a6"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb87745b2e6dc56361bfde481d5a378dc314b252a98d7dd19a651a3fa58f24a9"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58455b79ec2661c3600e65c0a716955adc2410f7383755d537584b0de41b1d8a"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4e42a40a5e164cbfdb7b386c966a588b1047558a990981ace551ed7e12ca9c2"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:91bd7d1773e64019f9288b7a5101f3ae50d3d8e6b1de7edee9c2ccc1d32f0c0a"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:bb90fb8bda722a1b9d48ac1e6c38f923ea757b3baf8ebd0c82e09c5c1a0e7a04"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:08e7ce672e35efa54c5024936e559469436f8b8096253404faeb54d2a878416f"}, + {file = "wrapt-1.17.2-cp38-cp38-win32.whl", hash = "sha256:410a92fefd2e0e10d26210e1dfb4a876ddaf8439ef60d6434f21ef8d87efc5b7"}, + {file = "wrapt-1.17.2-cp38-cp38-win_amd64.whl", hash = "sha256:95c658736ec15602da0ed73f312d410117723914a5c91a14ee4cdd72f1d790b3"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99039fa9e6306880572915728d7f6c24a86ec57b0a83f6b2491e1d8ab0235b9a"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2696993ee1eebd20b8e4ee4356483c4cb696066ddc24bd70bcbb80fa56ff9061"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:612dff5db80beef9e649c6d803a8d50c409082f1fedc9dbcdfde2983b2025b82"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c2caa1585c82b3f7a7ab56afef7b3602021d6da34fbc1cf234ff139fed3cd9"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c958bcfd59bacc2d0249dcfe575e71da54f9dcf4a8bdf89c4cb9a68a1170d73f"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc78a84e2dfbc27afe4b2bd7c80c8db9bca75cc5b85df52bfe634596a1da846b"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ba0f0eb61ef00ea10e00eb53a9129501f52385c44853dbd6c4ad3f403603083f"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1e1fe0e6ab7775fd842bc39e86f6dcfc4507ab0ffe206093e76d61cde37225c8"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c86563182421896d73858e08e1db93afdd2b947a70064b813d515d66549e15f9"}, + {file = "wrapt-1.17.2-cp39-cp39-win32.whl", hash = "sha256:f393cda562f79828f38a819f4788641ac7c4085f30f1ce1a68672baa686482bb"}, + {file = "wrapt-1.17.2-cp39-cp39-win_amd64.whl", hash = "sha256:36ccae62f64235cf8ddb682073a60519426fdd4725524ae38874adf72b5f2aeb"}, + {file = "wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8"}, + {file = "wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3"}, ] [[package]] @@ -6199,4 +6251,4 @@ tests = ["wikipedia"] [metadata] lock-version = "2.0" python-versions = "<3.14,>=3.10" -content-hash = "5794be1ead80f75ba6daa9deb27d9f0e5908686ac72eb7833077d8199b972919" +content-hash = "754d922b20713a9219ef3465aebf8f435d608be996dd55fe48968fa6c3fa7d4d" diff --git a/pyproject.toml b/pyproject.toml index 146a936d67..0755b02255 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,8 +59,8 @@ nltk = "^3.8.1" jinja2 = "^3.1.4" locust = {version = "^2.31.5", optional = true} wikipedia = {version = "^1.4.0", optional = true} -composio-langchain = "^0.6.7" -composio-core = "^0.6.7" +composio-langchain = "^0.6.15" +composio-core = "^0.6.15" alembic = "^1.13.3" pyhumps = "^3.8.0" psycopg2 = {version = "^2.9.10", optional = true} @@ -76,6 +76,7 @@ grpcio-tools = "^1.68.1" llama-index = "^0.12.2" llama-index-embeddings-openai = "^0.3.1" e2b-code-interpreter = {version = "^1.0.3", optional = true} +letta_client = "^0.1.15" [tool.poetry.extras] postgres = ["pgvector", "pg8000", "psycopg2-binary", "psycopg2"] diff --git a/tests/configs/llm_model_configs/letta-hosted.json b/tests/configs/llm_model_configs/letta-hosted.json index a0367c469d..82ece9e4dc 100644 --- a/tests/configs/llm_model_configs/letta-hosted.json +++ b/tests/configs/llm_model_configs/letta-hosted.json @@ -1,7 +1,7 @@ { - "context_window": 16384, - "model_endpoint_type": "openai", - "model_endpoint": "https://inference.memgpt.ai", - "model": "memgpt-openai", - "put_inner_thoughts_in_kwargs": true + "context_window": 8192, + "model_endpoint_type": "openai", + "model_endpoint": "https://inference.memgpt.ai", + "model": "memgpt-openai", + "put_inner_thoughts_in_kwargs": true } diff --git a/tests/helpers/utils.py b/tests/helpers/utils.py index a1f1382030..765c46125c 100644 --- a/tests/helpers/utils.py +++ b/tests/helpers/utils.py @@ -1,3 +1,5 @@ +import functools +import time from typing import Union from letta import LocalClient, RESTClient @@ -8,6 +10,68 @@ from letta.schemas.user import User as PydanticUser +def retry_until_threshold(threshold=0.5, max_attempts=10, sleep_time_seconds=4): + """ + Decorator to retry a test until a failure threshold is crossed. + + :param threshold: Expected passing rate (e.g., 0.5 means 50% success rate expected). + :param max_attempts: Maximum number of attempts to retry the test. + """ + + def decorator_retry(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + success_count = 0 + failure_count = 0 + + for attempt in range(max_attempts): + try: + func(*args, **kwargs) + success_count += 1 + except Exception as e: + failure_count += 1 + print(f"\033[93mAn attempt failed with error:\n{e}\033[0m") + + time.sleep(sleep_time_seconds) + + rate = success_count / max_attempts + if rate >= threshold: + print(f"Test met expected passing rate of {threshold:.2f}. Actual rate: {success_count}/{max_attempts}") + else: + raise AssertionError( + f"Test did not meet expected passing rate of {threshold:.2f}. Actual rate: {success_count}/{max_attempts}" + ) + + return wrapper + + return decorator_retry + + +def retry_until_success(max_attempts=10, sleep_time_seconds=4): + """ + Decorator to retry a function until it succeeds or the maximum number of attempts is reached. + + :param max_attempts: Maximum number of attempts to retry the function. + :param sleep_time_seconds: Time to wait between attempts, in seconds. + """ + + def decorator_retry(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + for attempt in range(1, max_attempts + 1): + try: + return func(*args, **kwargs) + except Exception as e: + print(f"\033[93mAttempt {attempt} failed with error:\n{e}\033[0m") + if attempt == max_attempts: + raise + time.sleep(sleep_time_seconds) + + return wrapper + + return decorator_retry + + def cleanup(client: Union[LocalClient, RESTClient], agent_uuid: str): # Clear all agents for agent_state in client.list_agents(): diff --git a/tests/integration_test_agent_tool_graph.py b/tests/integration_test_agent_tool_graph.py index 654d4a9e91..61db6dfbd6 100644 --- a/tests/integration_test_agent_tool_graph.py +++ b/tests/integration_test_agent_tool_graph.py @@ -604,3 +604,59 @@ def test_agent_reload_remembers_function_response(mock_e2b_api_key_none): print(f"Got successful response from client: \n\n{response}") cleanup(client=client, agent_uuid=agent_uuid) + + +@pytest.mark.timeout(60) # Sets a 60-second timeout for the test since this could loop infinitely +def test_simple_tool_rule(mock_e2b_api_key_none): + """ + Test a simple tool rule where fourth_secret_word must be called after flip_coin. + + Tool Flow: + flip_coin + | + v + fourth_secret_word + """ + client = create_client() + cleanup(client=client, agent_uuid=agent_uuid) + + # Create tools + flip_coin_name = "flip_coin" + another_secret_word = "first_secret_word" + secret_word = "fourth_secret_word" + random_tool = "can_play_game" + flip_coin_tool = client.create_or_update_tool(flip_coin, name=flip_coin_name) + secret_word_tool = client.create_or_update_tool(fourth_secret_word, name=secret_word) + another_secret_word_tool = client.create_or_update_tool(first_secret_word, name=another_secret_word) + random_tool = client.create_or_update_tool(can_play_game, name=random_tool) + tools = [flip_coin_tool, secret_word_tool, another_secret_word_tool, random_tool] + + # Create tool rule: after flip_coin, must call fourth_secret_word + tool_rule = ConditionalToolRule( + tool_name=flip_coin_name, + default_child=secret_word, + child_output_mapping={"*": secret_word}, + ) + + # Set up agent with the tool rule + agent_state = setup_agent( + client, config_file, agent_uuid, tool_rules=[tool_rule], tool_ids=[t.id for t in tools], include_base_tools=False + ) + + # Start conversation + response = client.user_message(agent_id=agent_state.id, message="Help me test the tools.") + + # Verify the tool calls + tool_calls = [msg for msg in response.messages if isinstance(msg, ToolCallMessage)] + assert len(tool_calls) >= 2 # Should have at least flip_coin and fourth_secret_word calls + assert_invoked_function_call(response.messages, flip_coin_name) + assert_invoked_function_call(response.messages, secret_word) + + # Find the flip_coin call + flip_coin_call = next((call for call in tool_calls if call.tool_call.name == "flip_coin"), None) + + # Verify that fourth_secret_word was called after flip_coin + flip_coin_call_index = tool_calls.index(flip_coin_call) + assert tool_calls[flip_coin_call_index + 1].tool_call.name == secret_word, "Fourth secret word should be called after flip_coin" + + cleanup(client, agent_uuid=agent_state.id) diff --git a/tests/test_base_functions.py b/tests/test_base_functions.py index 5b5bec6f4f..8736825b1f 100644 --- a/tests/test_base_functions.py +++ b/tests/test_base_functions.py @@ -1,9 +1,15 @@ +import json +import secrets +import string + import pytest import letta.functions.function_sets.base as base_functions from letta import LocalClient, create_client from letta.schemas.embedding_config import EmbeddingConfig +from letta.schemas.letta_message import ToolReturnMessage from letta.schemas.llm_config import LLMConfig +from tests.helpers.utils import retry_until_success @pytest.fixture(scope="module") @@ -18,7 +24,7 @@ def client(): @pytest.fixture(scope="module") def agent_obj(client: LocalClient): """Create a test agent that we can call functions on""" - agent_state = client.create_agent() + agent_state = client.create_agent(include_multi_agent_tools=True) agent_obj = client.server.load_agent(agent_id=agent_state.id, actor=client.user) yield agent_obj @@ -26,6 +32,17 @@ def agent_obj(client: LocalClient): client.delete_agent(agent_obj.agent_state.id) +@pytest.fixture(scope="module") +def other_agent_obj(client: LocalClient): + """Create another test agent that we can call functions on""" + agent_state = client.create_agent(include_multi_agent_tools=False) + + other_agent_obj = client.server.load_agent(agent_id=agent_state.id, actor=client.user) + yield other_agent_obj + + client.delete_agent(other_agent_obj.agent_state.id) + + def query_in_search_results(search_results, query): for result in search_results: if query.lower() in result["content"].lower(): @@ -97,3 +114,101 @@ def test_recall(client, agent_obj): # Conversation search result = base_functions.conversation_search(agent_obj, "banana") assert keyword in result + + +# This test is nondeterministic, so we retry until we get the perfect behavior from the LLM +@retry_until_success(max_attempts=5, sleep_time_seconds=2) +def test_send_message_to_agent(client, agent_obj, other_agent_obj): + long_random_string = "".join(secrets.choice(string.ascii_letters + string.digits) for _ in range(10)) + + # Encourage the agent to send a message to the other agent_obj with the secret string + client.send_message( + agent_id=agent_obj.agent_state.id, + role="user", + message=f"Use your tool to send a message to another agent with id {other_agent_obj.agent_state.id} with the secret password={long_random_string}", + ) + + # Conversation search the other agent + result = base_functions.conversation_search(other_agent_obj, long_random_string) + assert long_random_string in result + + # Search the sender agent for the response from another agent + in_context_messages = agent_obj.agent_manager.get_in_context_messages(agent_id=agent_obj.agent_state.id, actor=agent_obj.user) + found = False + target_snippet = f"Agent {other_agent_obj.agent_state.id} said " + + for m in in_context_messages: + if target_snippet in m.text: + found = True + break + + print(f"In context messages of the sender agent (without system):\n\n{"\n".join([m.text for m in in_context_messages[1:]])}") + if not found: + pytest.fail(f"Was not able to find an instance of the target snippet: {target_snippet}") + + # Test that the agent can still receive messages fine + response = client.send_message(agent_id=agent_obj.agent_state.id, role="user", message="So what did the other agent say?") + print(response.messages) + + +# This test is nondeterministic, so we retry until we get the perfect behavior from the LLM +@retry_until_success(max_attempts=5, sleep_time_seconds=2) +def test_send_message_to_agents_with_tags(client): + worker_tags = ["worker", "user-456"] + + # Clean up first from possibly failed tests + prev_worker_agents = client.server.agent_manager.list_agents(client.user, tags=worker_tags, match_all_tags=True) + for agent in prev_worker_agents: + client.delete_agent(agent.id) + + long_random_string = "".join(secrets.choice(string.ascii_letters + string.digits) for _ in range(10)) + + # Create "manager" agent + manager_agent_state = client.create_agent(include_multi_agent_tools=True) + manager_agent = client.server.load_agent(agent_id=manager_agent_state.id, actor=client.user) + + # Create 3 worker agents + worker_agents = [] + worker_tags = ["worker", "user-123"] + for _ in range(3): + worker_agent_state = client.create_agent(include_multi_agent_tools=False, tags=worker_tags) + worker_agent = client.server.load_agent(agent_id=worker_agent_state.id, actor=client.user) + worker_agents.append(worker_agent) + + # Create 2 worker agents that belong to a different user (These should NOT get the message) + worker_agents = [] + worker_tags = ["worker", "user-456"] + for _ in range(3): + worker_agent_state = client.create_agent(include_multi_agent_tools=False, tags=worker_tags) + worker_agent = client.server.load_agent(agent_id=worker_agent_state.id, actor=client.user) + worker_agents.append(worker_agent) + + # Encourage the manager to send a message to the other agent_obj with the secret string + response = client.send_message( + agent_id=manager_agent.agent_state.id, + role="user", + message=f"Send a message to all agents with tags {worker_tags} informing them of the secret password={long_random_string}", + ) + + for m in response.messages: + if isinstance(m, ToolReturnMessage): + tool_response = eval(json.loads(m.tool_return)["message"]) + print(f"\n\nManager agent tool response: \n{tool_response}\n\n") + assert len(tool_response) == len(worker_agents) + + # We can break after this, the ToolReturnMessage after is not related + break + + # Conversation search the worker agents + for agent in worker_agents: + result = base_functions.conversation_search(agent, long_random_string) + assert long_random_string in result + + # Test that the agent can still receive messages fine + response = client.send_message(agent_id=manager_agent.agent_state.id, role="user", message="So what did the other agents say?") + print("Manager agent followup message: \n\n" + "\n".join([str(m) for m in response.messages])) + + # Clean up agents + client.delete_agent(manager_agent_state.id) + for agent in worker_agents: + client.delete_agent(agent.agent_state.id) diff --git a/tests/test_client.py b/tests/test_client.py index a56d449f1e..7ecc89e4c7 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -15,6 +15,7 @@ from letta.schemas.agent import AgentState from letta.schemas.block import CreateBlock from letta.schemas.embedding_config import EmbeddingConfig +from letta.schemas.enums import MessageRole from letta.schemas.job import JobStatus from letta.schemas.letta_message import ToolReturnMessage from letta.schemas.llm_config import LLMConfig @@ -44,7 +45,8 @@ def run_server(): @pytest.fixture( params=[ {"server": False}, - ], # {"server": True}], # whether to use REST API server + {"server": True}, + ], # whether to use REST API server # params=[{"server": False}], # whether to use REST API server scope="module", ) @@ -78,6 +80,28 @@ def agent(client: Union[LocalClient, RESTClient]): client.delete_agent(agent_state.id) +# Fixture for test agent +@pytest.fixture +def search_agent_one(client: Union[LocalClient, RESTClient]): + agent_state = client.create_agent(name="Search Agent One") + + yield agent_state + + # delete agent + client.delete_agent(agent_state.id) + + +# Fixture for test agent +@pytest.fixture +def search_agent_two(client: Union[LocalClient, RESTClient]): + agent_state = client.create_agent(name="Search Agent Two") + + yield agent_state + + # delete agent + client.delete_agent(agent_state.id) + + @pytest.fixture(autouse=True) def clear_tables(): """Clear the sandbox tables before each test.""" @@ -222,6 +246,66 @@ def test_add_and_manage_tags_for_agent(client: Union[LocalClient, RESTClient]): client.delete_agent(agent.id) +def test_agent_tags(client: Union[LocalClient, RESTClient]): + """Test creating agents with tags and retrieving tags via the API.""" + if not isinstance(client, RESTClient): + pytest.skip("This test only runs when the server is enabled") + + # Create multiple agents with different tags + agent1 = client.create_agent( + name=f"test_agent_{str(uuid.uuid4())}", + llm_config=LLMConfig.default_config("gpt-4"), + embedding_config=EmbeddingConfig.default_config(provider="openai"), + tags=["test", "agent1", "production"], + ) + + agent2 = client.create_agent( + name=f"test_agent_{str(uuid.uuid4())}", + llm_config=LLMConfig.default_config("gpt-4"), + embedding_config=EmbeddingConfig.default_config(provider="openai"), + tags=["test", "agent2", "development"], + ) + + agent3 = client.create_agent( + name=f"test_agent_{str(uuid.uuid4())}", + llm_config=LLMConfig.default_config("gpt-4"), + embedding_config=EmbeddingConfig.default_config(provider="openai"), + tags=["test", "agent3", "production"], + ) + + # Test getting all tags + all_tags = client.get_tags() + expected_tags = ["agent1", "agent2", "agent3", "development", "production", "test"] + assert sorted(all_tags) == expected_tags + + # Test pagination + paginated_tags = client.get_tags(limit=2) + assert len(paginated_tags) == 2 + assert paginated_tags[0] == "agent1" + assert paginated_tags[1] == "agent2" + + # Test pagination with cursor + next_page_tags = client.get_tags(cursor="agent2", limit=2) + assert len(next_page_tags) == 2 + assert next_page_tags[0] == "agent3" + assert next_page_tags[1] == "development" + + # Test text search + prod_tags = client.get_tags(query_text="prod") + assert sorted(prod_tags) == ["production"] + + dev_tags = client.get_tags(query_text="dev") + assert sorted(dev_tags) == ["development"] + + agent_tags = client.get_tags(query_text="agent") + assert sorted(agent_tags) == ["agent1", "agent2", "agent3"] + + # Remove agents + client.delete_agent(agent1.id) + client.delete_agent(agent2.id) + client.delete_agent(agent3.id) + + def test_update_agent_memory_label(client: Union[LocalClient, RESTClient], agent: AgentState): """Test that we can update the label of a block in an agent's memory""" @@ -296,7 +380,6 @@ def test_add_remove_agent_memory_block(client: Union[LocalClient, RESTClient], a # # TODO we should probably not allow updating the core memory limit if # # TODO in which case we should modify this test to actually to a proper token counter check - # finally: # client.delete_agent(new_agent.id) @@ -453,25 +536,145 @@ async def send_message_task(message: str): def test_send_message_async(client: Union[LocalClient, RESTClient], agent: AgentState): - """Test that we can send a message asynchronously""" + """ + Test that we can send a message asynchronously and retrieve the messages, along with usage statistics + """ if not isinstance(client, RESTClient): pytest.skip("send_message_async is only supported by the RESTClient") print("Sending message asynchronously") - job = client.send_message_async(agent_id=agent.id, role="user", message="This is a test message, no need to respond.") - assert job.id is not None - assert job.status == JobStatus.created - print(f"Job created, job={job}, status={job.status}") + test_message = "This is a test message, respond to the user with a sentence." + run = client.send_message_async(agent_id=agent.id, role="user", message=test_message) + assert run.id is not None + assert run.status == JobStatus.created + print(f"Run created, run={run}, status={run.status}") # Wait for the job to complete, cancel it if takes over 10 seconds start_time = time.time() - while job.status == JobStatus.created: + while run.status == JobStatus.created: time.sleep(1) - job = client.get_job(job_id=job.id) - print(f"Job status: {job.status}") + run = client.get_run(run_id=run.id) + print(f"Run status: {run.status}") if time.time() - start_time > 10: - pytest.fail("Job took too long to complete") + pytest.fail("Run took too long to complete") + + print(f"Run completed in {time.time() - start_time} seconds, run={run}") + assert run.status == JobStatus.completed + + # Get messages for the job + messages = client.get_run_messages(run_id=run.id) + assert len(messages) >= 2 # At least assistant response + + # Check filters + assistant_messages = client.get_run_messages(run_id=run.id, role=MessageRole.assistant) + assert len(assistant_messages) > 0 + tool_messages = client.get_run_messages(run_id=run.id, role=MessageRole.tool) + assert len(tool_messages) > 0 + + # Get and verify usage statistics + usage = client.get_run_usage(run_id=run.id)[0] + assert usage.completion_tokens >= 0 + assert usage.prompt_tokens >= 0 + assert usage.total_tokens >= 0 + assert usage.total_tokens == usage.completion_tokens + usage.prompt_tokens + + +# ========================================== +# TESTS FOR AGENT LISTING +# ========================================== + + +def test_agent_listing(client: Union[LocalClient, RESTClient], agent, search_agent_one, search_agent_two): + """Test listing agents with pagination and query text filtering.""" + # Test query text filtering + search_results = client.list_agents(query_text="search agent") + assert len(search_results) == 2 + search_agent_ids = {agent.id for agent in search_results} + assert search_agent_one.id in search_agent_ids + assert search_agent_two.id in search_agent_ids + assert agent.id not in search_agent_ids + + different_results = client.list_agents(query_text="client") + assert len(different_results) == 1 + assert different_results[0].id == agent.id + + # Test pagination + first_page = client.list_agents(query_text="search agent", limit=1) + assert len(first_page) == 1 + first_agent = first_page[0] + + second_page = client.list_agents(query_text="search agent", cursor=first_agent.id, limit=1) # Use agent ID as cursor + assert len(second_page) == 1 + assert second_page[0].id != first_agent.id + + # Verify we got both search agents with no duplicates + all_ids = {first_page[0].id, second_page[0].id} + assert len(all_ids) == 2 + assert all_ids == {search_agent_one.id, search_agent_two.id} + + # Test listing without any filters + all_agents = client.list_agents() + assert len(all_agents) == 3 + assert all(agent.id in {a.id for a in all_agents} for agent in [search_agent_one, search_agent_two, agent]) + + +def test_agent_creation(client: Union[LocalClient, RESTClient]): + """Test that block IDs are properly attached when creating an agent.""" + if not isinstance(client, RESTClient): + pytest.skip("This test only runs when the server is enabled") + + from letta import BasicBlockMemory + + # Create a test block that will represent user preferences + user_preferences_block = client.create_block(label="user_preferences", value="", limit=10000) + + # Create test tools + def test_tool(): + """A simple test tool.""" + return "Hello from test tool!" + + def another_test_tool(): + """Another test tool.""" + return "Hello from another test tool!" + + tool1 = client.create_or_update_tool(func=test_tool, name="test_tool", tags=["test"]) + tool2 = client.create_or_update_tool(func=another_test_tool, name="another_test_tool", tags=["test"]) + + # Create test blocks + offline_persona_block = client.create_block(label="persona", value="persona description", limit=5000) + mindy_block = client.create_block(label="mindy", value="Mindy is a helpful assistant", limit=5000) + memory_blocks = BasicBlockMemory(blocks=[offline_persona_block, mindy_block]) + + # Create agent with the blocks and tools + agent = client.create_agent( + name=f"test_agent_{str(uuid.uuid4())}", + memory=memory_blocks, + llm_config=LLMConfig.default_config("gpt-4"), + embedding_config=EmbeddingConfig.default_config(provider="openai"), + tool_ids=[tool1.id, tool2.id], + include_base_tools=False, + tags=["test"], + block_ids=[user_preferences_block.id], + ) - print(f"Job completed in {time.time() - start_time} seconds, job={job}") - assert job.status == JobStatus.completed + # Verify the agent was created successfully + assert agent is not None + assert agent.id is not None + + # Verify the blocks are properly attached + agent_blocks = client.get_agent_memory_blocks(agent.id) + agent_block_ids = {block.id for block in agent_blocks} + + # Check that all memory blocks are present + memory_block_ids = {block.id for block in memory_blocks.blocks} + for block_id in memory_block_ids | {user_preferences_block.id}: + assert block_id in agent_block_ids + + # Verify the tools are properly attached + agent_tools = client.get_tools_from_agent(agent.id) + assert len(agent_tools) == 2 + tool_ids = {tool1.id, tool2.id} + assert all(tool.id in tool_ids for tool in agent_tools) + + client.delete_agent(agent_id=agent.id) diff --git a/tests/test_client_legacy.py b/tests/test_client_legacy.py index 202adf17f5..51fe6d54a2 100644 --- a/tests/test_client_legacy.py +++ b/tests/test_client_legacy.py @@ -11,7 +11,7 @@ from letta import create_client from letta.client.client import LocalClient, RESTClient -from letta.constants import BASE_MEMORY_TOOLS, BASE_TOOLS, DEFAULT_PRESET +from letta.constants import BASE_MEMORY_TOOLS, BASE_TOOLS, DEFAULT_PRESET, MULTI_AGENT_TOOLS from letta.orm import FileMetadata, Source from letta.schemas.agent import AgentState from letta.schemas.embedding_config import EmbeddingConfig @@ -339,7 +339,7 @@ def test_list_tools_pagination(client: Union[LocalClient, RESTClient]): def test_list_tools(client: Union[LocalClient, RESTClient]): tools = client.upsert_base_tools() tool_names = [t.name for t in tools] - expected = BASE_TOOLS + BASE_MEMORY_TOOLS + expected = BASE_TOOLS + BASE_MEMORY_TOOLS + MULTI_AGENT_TOOLS assert sorted(tool_names) == sorted(expected) diff --git a/tests/test_managers.py b/tests/test_managers.py index 8fa53ba2be..efe736f6b6 100644 --- a/tests/test_managers.py +++ b/tests/test_managers.py @@ -7,7 +7,7 @@ from sqlalchemy.exc import IntegrityError from letta.config import LettaConfig -from letta.constants import BASE_MEMORY_TOOLS, BASE_TOOLS +from letta.constants import BASE_MEMORY_TOOLS, BASE_TOOLS, MULTI_AGENT_TOOLS from letta.embeddings import embedding_model from letta.functions.functions import derive_openai_json_schema, parse_source_code from letta.orm import ( @@ -17,6 +17,7 @@ BlocksAgents, FileMetadata, Job, + JobMessage, Message, Organization, Provider, @@ -30,7 +31,7 @@ User, ) from letta.orm.agents_tags import AgentsTags -from letta.orm.enums import ToolType +from letta.orm.enums import JobType, ToolType from letta.orm.errors import NoResultFound, UniqueConstraintViolationError from letta.schemas.agent import CreateAgent, UpdateAgent from letta.schemas.block import Block as PydanticBlock @@ -41,17 +42,21 @@ from letta.schemas.file import FileMetadata as PydanticFileMetadata from letta.schemas.job import Job as PydanticJob from letta.schemas.job import JobUpdate +from letta.schemas.letta_request import LettaRequestConfig from letta.schemas.llm_config import LLMConfig from letta.schemas.message import Message as PydanticMessage from letta.schemas.message import MessageCreate, MessageUpdate +from letta.schemas.openai.chat_completions import ToolCall, ToolCallFunction from letta.schemas.organization import Organization as PydanticOrganization from letta.schemas.passage import Passage as PydanticPassage +from letta.schemas.run import Run as PydanticRun from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate, SandboxType from letta.schemas.source import Source as PydanticSource from letta.schemas.source import SourceUpdate from letta.schemas.tool import Tool as PydanticTool from letta.schemas.tool import ToolUpdate from letta.schemas.tool_rule import InitToolRule +from letta.schemas.usage import LettaUsageStatistics from letta.schemas.user import User as PydanticUser from letta.schemas.user import UserUpdate from letta.server.server import SyncServer @@ -81,6 +86,7 @@ def clear_tables(server: SyncServer): session.execute(delete(Message)) session.execute(delete(AgentPassage)) session.execute(delete(SourcePassage)) + session.execute(delete(JobMessage)) # Clear JobMessage first session.execute(delete(Job)) session.execute(delete(ToolsAgents)) # Clear ToolsAgents first session.execute(delete(BlocksAgents)) @@ -187,6 +193,28 @@ def print_tool(message: str): yield tool +@pytest.fixture +def default_job(server: SyncServer, default_user): + """Fixture to create and return a default job.""" + job_pydantic = PydanticJob( + user_id=default_user.id, + status=JobStatus.pending, + ) + job = server.job_manager.create_job(pydantic_job=job_pydantic, actor=default_user) + yield job + + +@pytest.fixture +def default_run(server: SyncServer, default_user): + """Fixture to create and return a default job.""" + run_pydantic = PydanticRun( + user_id=default_user.id, + status=JobStatus.pending, + ) + run = server.job_manager.create_job(pydantic_job=run_pydantic, actor=default_user) + yield run + + @pytest.fixture def agent_passage_fixture(server: SyncServer, default_user, sarah_agent): """Fixture to create an agent passage.""" @@ -887,6 +915,175 @@ def test_list_agents_by_tags_pagination(server: SyncServer, default_user, defaul assert agent2.id in all_ids +def test_list_agents_query_text_pagination(server: SyncServer, default_user, default_organization): + """Test listing agents with query text filtering and pagination.""" + # Create test agents with specific names and descriptions + agent1 = server.agent_manager.create_agent( + agent_create=CreateAgent( + name="Search Agent One", + memory_blocks=[], + description="This is a search agent for testing", + llm_config=LLMConfig.default_config("gpt-4"), + embedding_config=EmbeddingConfig.default_config(provider="openai"), + ), + actor=default_user, + ) + + agent2 = server.agent_manager.create_agent( + agent_create=CreateAgent( + name="Search Agent Two", + memory_blocks=[], + description="Another search agent for testing", + llm_config=LLMConfig.default_config("gpt-4"), + embedding_config=EmbeddingConfig.default_config(provider="openai"), + ), + actor=default_user, + ) + + agent3 = server.agent_manager.create_agent( + agent_create=CreateAgent( + name="Different Agent", + memory_blocks=[], + description="This is a different agent", + llm_config=LLMConfig.default_config("gpt-4"), + embedding_config=EmbeddingConfig.default_config(provider="openai"), + ), + actor=default_user, + ) + + # Test query text filtering + search_results = server.agent_manager.list_agents(actor=default_user, query_text="search agent") + assert len(search_results) == 2 + search_agent_ids = {agent.id for agent in search_results} + assert agent1.id in search_agent_ids + assert agent2.id in search_agent_ids + assert agent3.id not in search_agent_ids + + different_results = server.agent_manager.list_agents(actor=default_user, query_text="different agent") + assert len(different_results) == 1 + assert different_results[0].id == agent3.id + + # Test pagination with query text + first_page = server.agent_manager.list_agents(actor=default_user, query_text="search agent", limit=1) + assert len(first_page) == 1 + first_agent_id = first_page[0].id + + # Get second page using cursor + second_page = server.agent_manager.list_agents(actor=default_user, query_text="search agent", cursor=first_agent_id, limit=1) + assert len(second_page) == 1 + assert second_page[0].id != first_agent_id + + # Verify we got both search agents with no duplicates + all_ids = {first_page[0].id, second_page[0].id} + assert len(all_ids) == 2 + assert all_ids == {agent1.id, agent2.id} + + +# ====================================================================================================================== +# AgentManager Tests - Messages Relationship +# ====================================================================================================================== + + +def test_reset_messages_no_messages(server: SyncServer, sarah_agent, default_user): + """ + Test that resetting messages on an agent that has zero messages + does not fail and clears out message_ids if somehow it's non-empty. + """ + # Force a weird scenario: Suppose the message_ids field was set non-empty (without actual messages). + server.agent_manager.update_agent(sarah_agent.id, UpdateAgent(message_ids=["ghost-message-id"]), actor=default_user) + updated_agent = server.agent_manager.get_agent_by_id(sarah_agent.id, default_user) + assert updated_agent.message_ids == ["ghost-message-id"] + + # Reset messages + reset_agent = server.agent_manager.reset_messages(agent_id=sarah_agent.id, actor=default_user) + assert len(reset_agent.message_ids) == 1 + # Double check that physically no messages exist + assert server.message_manager.size(agent_id=sarah_agent.id, actor=default_user) == 1 + + +def test_reset_messages_default_messages(server: SyncServer, sarah_agent, default_user): + """ + Test that resetting messages on an agent that has zero messages + does not fail and clears out message_ids if somehow it's non-empty. + """ + # Force a weird scenario: Suppose the message_ids field was set non-empty (without actual messages). + server.agent_manager.update_agent(sarah_agent.id, UpdateAgent(message_ids=["ghost-message-id"]), actor=default_user) + updated_agent = server.agent_manager.get_agent_by_id(sarah_agent.id, default_user) + assert updated_agent.message_ids == ["ghost-message-id"] + + # Reset messages + reset_agent = server.agent_manager.reset_messages(agent_id=sarah_agent.id, actor=default_user, add_default_initial_messages=True) + assert len(reset_agent.message_ids) == 4 + # Double check that physically no messages exist + assert server.message_manager.size(agent_id=sarah_agent.id, actor=default_user) == 4 + + +def test_reset_messages_with_existing_messages(server: SyncServer, sarah_agent, default_user): + """ + Test that resetting messages on an agent with actual messages + deletes them from the database and clears message_ids. + """ + # 1. Create multiple messages for the agent + msg1 = server.message_manager.create_message( + PydanticMessage( + agent_id=sarah_agent.id, + organization_id=default_user.organization_id, + role="user", + text="Hello, Sarah!", + ), + actor=default_user, + ) + msg2 = server.message_manager.create_message( + PydanticMessage( + agent_id=sarah_agent.id, + organization_id=default_user.organization_id, + role="assistant", + text="Hello, user!", + ), + actor=default_user, + ) + + # Verify the messages were created + agent_before = server.agent_manager.get_agent_by_id(sarah_agent.id, default_user) + # This is 4 because creating the message does not necessarily add it to the in context message ids + assert len(agent_before.message_ids) == 4 + assert server.message_manager.size(agent_id=sarah_agent.id, actor=default_user) == 6 + + # 2. Reset all messages + reset_agent = server.agent_manager.reset_messages(agent_id=sarah_agent.id, actor=default_user) + + # 3. Verify the agent now has zero message_ids + assert len(reset_agent.message_ids) == 1 + + # 4. Verify the messages are physically removed + assert server.message_manager.size(agent_id=sarah_agent.id, actor=default_user) == 1 + + +def test_reset_messages_idempotency(server: SyncServer, sarah_agent, default_user): + """ + Test that calling reset_messages multiple times has no adverse effect. + """ + # Create a single message + server.message_manager.create_message( + PydanticMessage( + agent_id=sarah_agent.id, + organization_id=default_user.organization_id, + role="user", + text="Hello, Sarah!", + ), + actor=default_user, + ) + # First reset + reset_agent = server.agent_manager.reset_messages(agent_id=sarah_agent.id, actor=default_user) + assert len(reset_agent.message_ids) == 1 + assert server.message_manager.size(agent_id=sarah_agent.id, actor=default_user) == 1 + + # Second reset should do nothing new + reset_agent_again = server.agent_manager.reset_messages(agent_id=sarah_agent.id, actor=default_user) + assert len(reset_agent.message_ids) == 1 + assert server.message_manager.size(agent_id=sarah_agent.id, actor=default_user) == 1 + + # ====================================================================================================================== # AgentManager Tests - Blocks Relationship # ====================================================================================================================== @@ -1343,6 +1540,8 @@ def test_update_user(server: SyncServer): # ====================================================================================================================== # ToolManager Tests # ====================================================================================================================== + + def test_create_tool(server: SyncServer, print_tool, default_user, default_organization): # Assertions to ensure the created tool matches the expected values assert print_tool.created_by_id == default_user.id @@ -1517,7 +1716,7 @@ def test_delete_tool_by_id(server: SyncServer, print_tool, default_user): def test_upsert_base_tools(server: SyncServer, default_user): tools = server.tool_manager.upsert_base_tools(actor=default_user) - expected_tool_names = sorted(BASE_TOOLS + BASE_MEMORY_TOOLS) + expected_tool_names = sorted(BASE_TOOLS + BASE_MEMORY_TOOLS + MULTI_AGENT_TOOLS) assert sorted([t.name for t in tools]) == expected_tool_names # Call it again to make sure it doesn't create duplicates @@ -1528,8 +1727,12 @@ def test_upsert_base_tools(server: SyncServer, default_user): for t in tools: if t.name in BASE_TOOLS: assert t.tool_type == ToolType.LETTA_CORE - else: + elif t.name in BASE_MEMORY_TOOLS: assert t.tool_type == ToolType.LETTA_MEMORY_CORE + elif t.name in MULTI_AGENT_TOOLS: + assert t.tool_type == ToolType.LETTA_MULTI_AGENT_CORE + else: + pytest.fail(f"The tool name is unrecognized as a base tool: {t.name}") assert t.source_code is None assert t.json_schema @@ -2358,3 +2561,429 @@ def test_list_jobs_by_status(server: SyncServer, default_user): assert len(completed_jobs) == 1 assert completed_jobs[0].metadata_["type"] == job_data_completed.metadata_["type"] + + +def test_list_jobs_filter_by_type(server: SyncServer, default_user, default_job): + """Test that list_jobs correctly filters by job_type.""" + # Create a run job + run_pydantic = PydanticJob( + user_id=default_user.id, + status=JobStatus.pending, + job_type=JobType.RUN, + ) + run = server.job_manager.create_job(pydantic_job=run_pydantic, actor=default_user) + + # List only regular jobs + jobs = server.job_manager.list_jobs(actor=default_user) + assert len(jobs) == 1 + assert jobs[0].id == default_job.id + + # List only run jobs + jobs = server.job_manager.list_jobs(actor=default_user, job_type=JobType.RUN) + assert len(jobs) == 1 + assert jobs[0].id == run.id + + +# ====================================================================================================================== +# JobManager Tests - Messages +# ====================================================================================================================== + + +def test_job_messages_add(server: SyncServer, default_run, hello_world_message_fixture, default_user): + """Test adding a message to a job.""" + # Add message to job + server.job_manager.add_message_to_job( + job_id=default_run.id, + message_id=hello_world_message_fixture.id, + actor=default_user, + ) + + # Verify message was added + messages = server.job_manager.get_job_messages( + job_id=default_run.id, + actor=default_user, + ) + assert len(messages) == 1 + assert messages[0].id == hello_world_message_fixture.id + assert messages[0].text == hello_world_message_fixture.text + + +def test_job_messages_pagination(server: SyncServer, default_run, default_user, sarah_agent): + """Test pagination of job messages.""" + # Create multiple messages + message_ids = [] + for i in range(5): + message = PydanticMessage( + organization_id=default_user.organization_id, + agent_id=sarah_agent.id, + role=MessageRole.user, + text=f"Test message {i}", + ) + msg = server.message_manager.create_message(message, actor=default_user) + message_ids.append(msg.id) + + # Add message to job + server.job_manager.add_message_to_job( + job_id=default_run.id, + message_id=msg.id, + actor=default_user, + ) + + # Test pagination with limit + messages = server.job_manager.get_job_messages( + job_id=default_run.id, + actor=default_user, + limit=2, + ) + assert len(messages) == 2 + assert messages[0].id == message_ids[0] + assert messages[1].id == message_ids[1] + + # Test pagination with cursor + messages = server.job_manager.get_job_messages( + job_id=default_run.id, + actor=default_user, + cursor=message_ids[1], + limit=2, + ) + assert len(messages) == 2 + assert messages[0].id == message_ids[2] + assert messages[1].id == message_ids[3] + + +def test_job_messages_ordering(server: SyncServer, default_run, default_user, sarah_agent): + """Test that messages are ordered by created_at.""" + # Create messages with different timestamps + base_time = datetime.utcnow() + message_times = [ + base_time - timedelta(minutes=2), + base_time - timedelta(minutes=1), + base_time, + ] + + for i, created_at in enumerate(message_times): + message = PydanticMessage( + role=MessageRole.user, + text="Test message", + organization_id=default_user.organization_id, + agent_id=sarah_agent.id, + created_at=created_at, + ) + msg = server.message_manager.create_message(message, actor=default_user) + + # Add message to job + server.job_manager.add_message_to_job( + job_id=default_run.id, + message_id=msg.id, + actor=default_user, + ) + + # Verify messages are returned in chronological order + returned_messages = server.job_manager.get_job_messages( + job_id=default_run.id, + actor=default_user, + ) + + assert len(returned_messages) == 3 + assert returned_messages[0].created_at < returned_messages[1].created_at + assert returned_messages[1].created_at < returned_messages[2].created_at + + # Verify messages are returned in descending order + returned_messages = server.job_manager.get_job_messages( + job_id=default_run.id, + actor=default_user, + ascending=False, + ) + + assert len(returned_messages) == 3 + assert returned_messages[0].created_at > returned_messages[1].created_at + assert returned_messages[1].created_at > returned_messages[2].created_at + + +def test_job_messages_empty(server: SyncServer, default_run, default_user): + """Test getting messages for a job with no messages.""" + messages = server.job_manager.get_job_messages( + job_id=default_run.id, + actor=default_user, + ) + assert len(messages) == 0 + + +def test_job_messages_add_duplicate(server: SyncServer, default_run, hello_world_message_fixture, default_user): + """Test adding the same message to a job twice.""" + # Add message to job first time + server.job_manager.add_message_to_job( + job_id=default_run.id, + message_id=hello_world_message_fixture.id, + actor=default_user, + ) + + # Attempt to add same message again + with pytest.raises(IntegrityError): + server.job_manager.add_message_to_job( + job_id=default_run.id, + message_id=hello_world_message_fixture.id, + actor=default_user, + ) + + +def test_job_messages_filter(server: SyncServer, default_run, default_user, sarah_agent): + """Test getting messages associated with a job.""" + # Create test messages with different roles and tool calls + messages = [ + PydanticMessage( + role=MessageRole.user, + text="Hello", + organization_id=default_user.organization_id, + agent_id=sarah_agent.id, + ), + PydanticMessage( + role=MessageRole.assistant, + text="Hi there!", + organization_id=default_user.organization_id, + agent_id=sarah_agent.id, + ), + PydanticMessage( + role=MessageRole.assistant, + text="Let me help you with that", + organization_id=default_user.organization_id, + agent_id=sarah_agent.id, + tool_calls=[ + ToolCall( + id="call_1", + function=ToolCallFunction( + name="test_tool", + arguments='{"arg1": "value1"}', + ), + ) + ], + ), + ] + + # Add messages to job + for msg in messages: + created_msg = server.message_manager.create_message(msg, actor=default_user) + server.job_manager.add_message_to_job(default_run.id, created_msg.id, actor=default_user) + + # Test getting all messages + all_messages = server.job_manager.get_job_messages(job_id=default_run.id, actor=default_user) + assert len(all_messages) == 3 + + # Test filtering by role + user_messages = server.job_manager.get_job_messages(job_id=default_run.id, actor=default_user, role=MessageRole.user) + assert len(user_messages) == 1 + assert user_messages[0].role == MessageRole.user + + # Test limit + limited_messages = server.job_manager.get_job_messages(job_id=default_run.id, actor=default_user, limit=2) + assert len(limited_messages) == 2 + + +def test_get_run_messages_cursor(server: SyncServer, default_user: PydanticUser, sarah_agent): + """Test getting messages for a run with request config.""" + # Create a run with custom request config + run = server.job_manager.create_job( + pydantic_job=PydanticRun( + user_id=default_user.id, + status=JobStatus.created, + request_config=LettaRequestConfig( + use_assistant_message=False, assistant_message_tool_name="custom_tool", assistant_message_tool_kwarg="custom_arg" + ), + ), + actor=default_user, + ) + + # Add some messages + messages = [ + PydanticMessage( + organization_id=default_user.organization_id, + agent_id=sarah_agent.id, + role=MessageRole.user if i % 2 == 0 else MessageRole.assistant, + text=f"Test message {i}", + tool_calls=( + [{"id": f"call_{i}", "function": {"name": "custom_tool", "arguments": '{"custom_arg": "test"}'}}] if i % 2 == 1 else None + ), + ) + for i in range(4) + ] + + for msg in messages: + created_msg = server.message_manager.create_message(msg, actor=default_user) + server.job_manager.add_message_to_job(job_id=run.id, message_id=created_msg.id, actor=default_user) + + # Get messages and verify they're converted correctly + result = server.job_manager.get_run_messages_cursor(run_id=run.id, actor=default_user) + + # Verify correct number of messages. Assistant messages should be parsed + assert len(result) == 6 + + # Verify assistant messages are parsed according to request config + tool_call_messages = [msg for msg in result if msg.message_type == "tool_call_message"] + reasoning_messages = [msg for msg in result if msg.message_type == "reasoning_message"] + assert len(tool_call_messages) == 2 + assert len(reasoning_messages) == 2 + for msg in tool_call_messages: + assert msg.tool_call is not None + assert msg.tool_call.name == "custom_tool" + + +# ====================================================================================================================== +# JobManager Tests - Usage Statistics +# ====================================================================================================================== + + +def test_job_usage_stats_add_and_get(server: SyncServer, default_job, default_user): + """Test adding and retrieving job usage statistics.""" + job_manager = server.job_manager + + # Add usage statistics + job_manager.add_job_usage( + job_id=default_job.id, + usage=LettaUsageStatistics( + completion_tokens=100, + prompt_tokens=50, + total_tokens=150, + step_count=5, + ), + step_id="step_1", + actor=default_user, + ) + + # Get usage statistics + usage_stats = job_manager.get_job_usage(job_id=default_job.id, actor=default_user) + + # Verify the statistics + assert usage_stats.completion_tokens == 100 + assert usage_stats.prompt_tokens == 50 + assert usage_stats.total_tokens == 150 + + +def test_job_usage_stats_get_no_stats(server: SyncServer, default_job, default_user): + """Test getting usage statistics for a job with no stats.""" + job_manager = server.job_manager + + # Get usage statistics for a job with no stats + usage_stats = job_manager.get_job_usage(job_id=default_job.id, actor=default_user) + + # Verify default values + assert usage_stats.completion_tokens == 0 + assert usage_stats.prompt_tokens == 0 + assert usage_stats.total_tokens == 0 + + +def test_job_usage_stats_add_multiple(server: SyncServer, default_job, default_user): + """Test adding multiple usage statistics entries for a job.""" + job_manager = server.job_manager + + # Add first usage statistics entry + job_manager.add_job_usage( + job_id=default_job.id, + usage=LettaUsageStatistics( + completion_tokens=100, + prompt_tokens=50, + total_tokens=150, + step_count=5, + ), + step_id="step_1", + actor=default_user, + ) + + # Add second usage statistics entry + job_manager.add_job_usage( + job_id=default_job.id, + usage=LettaUsageStatistics( + completion_tokens=200, + prompt_tokens=100, + total_tokens=300, + step_count=10, + ), + step_id="step_2", + actor=default_user, + ) + + # Get usage statistics (should return the latest entry) + usage_stats = job_manager.get_job_usage(job_id=default_job.id, actor=default_user) + + # Verify we get the most recent statistics + assert usage_stats.completion_tokens == 200 + assert usage_stats.prompt_tokens == 100 + assert usage_stats.total_tokens == 300 + + +def test_job_usage_stats_get_nonexistent_job(server: SyncServer, default_user): + """Test getting usage statistics for a nonexistent job.""" + job_manager = server.job_manager + + with pytest.raises(NoResultFound): + job_manager.get_job_usage(job_id="nonexistent_job", actor=default_user) + + +def test_job_usage_stats_add_nonexistent_job(server: SyncServer, default_user): + """Test adding usage statistics for a nonexistent job.""" + job_manager = server.job_manager + + with pytest.raises(NoResultFound): + job_manager.add_job_usage( + job_id="nonexistent_job", + usage=LettaUsageStatistics( + completion_tokens=100, + prompt_tokens=50, + total_tokens=150, + step_count=5, + ), + step_id="step_1", + actor=default_user, + ) + + +def test_list_tags(server: SyncServer, default_user, default_organization): + """Test listing tags functionality.""" + # Create multiple agents with different tags + agents = [] + tags = ["alpha", "beta", "gamma", "delta", "epsilon"] + + # Create agents with different combinations of tags + for i in range(3): + agent = server.agent_manager.create_agent( + actor=default_user, + agent_create=CreateAgent( + name="tag_agent_" + str(i), + memory_blocks=[], + llm_config=LLMConfig.default_config("gpt-4"), + embedding_config=EmbeddingConfig.default_config(provider="openai"), + tags=tags[i : i + 3], # Each agent gets 3 consecutive tags + ), + ) + agents.append(agent) + + # Test basic listing - should return all unique tags in alphabetical order + all_tags = server.agent_manager.list_tags(actor=default_user) + assert all_tags == sorted(tags[:5]) # All tags should be present and sorted + + # Test pagination with limit + limited_tags = server.agent_manager.list_tags(actor=default_user, limit=2) + assert limited_tags == tags[:2] # Should return first 2 tags + + # Test pagination with cursor + cursor_tags = server.agent_manager.list_tags(actor=default_user, cursor="beta") + assert cursor_tags == ["delta", "epsilon", "gamma"] # Tags after "beta" + + # Test text search + search_tags = server.agent_manager.list_tags(actor=default_user, query_text="ta") + assert search_tags == ["beta", "delta"] # Only tags containing "ta" + + # Test with non-matching search + no_match_tags = server.agent_manager.list_tags(actor=default_user, query_text="xyz") + assert no_match_tags == [] # Should return empty list + + # Test with different organization + other_org = server.organization_manager.create_organization(pydantic_org=PydanticOrganization(name="Other Org")) + other_user = server.user_manager.create_user(PydanticUser(name="Other User", organization_id=other_org.id)) + + # Other org's tags should be empty + other_org_tags = server.agent_manager.list_tags(actor=other_user) + assert other_org_tags == [] + + # Cleanup + for agent in agents: + server.agent_manager.delete_agent(agent.id, actor=default_user) diff --git a/tests/test_model_letta_perfomance.py b/tests/test_model_letta_performance.py similarity index 86% rename from tests/test_model_letta_perfomance.py rename to tests/test_model_letta_performance.py index d20d64ca14..4d72126fa6 100644 --- a/tests/test_model_letta_perfomance.py +++ b/tests/test_model_letta_performance.py @@ -1,6 +1,4 @@ -import functools import os -import time import pytest @@ -13,74 +11,13 @@ check_first_response_is_valid_for_llm_endpoint, run_embedding_endpoint, ) +from tests.helpers.utils import retry_until_success, retry_until_threshold # directories embedding_config_dir = "tests/configs/embedding_model_configs" llm_config_dir = "tests/configs/llm_model_configs" -def retry_until_threshold(threshold=0.5, max_attempts=10, sleep_time_seconds=4): - """ - Decorator to retry a test until a failure threshold is crossed. - - :param threshold: Expected passing rate (e.g., 0.5 means 50% success rate expected). - :param max_attempts: Maximum number of attempts to retry the test. - """ - - def decorator_retry(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - success_count = 0 - failure_count = 0 - - for attempt in range(max_attempts): - try: - func(*args, **kwargs) - success_count += 1 - except Exception as e: - failure_count += 1 - print(f"\033[93mAn attempt failed with error:\n{e}\033[0m") - - time.sleep(sleep_time_seconds) - - rate = success_count / max_attempts - if rate >= threshold: - print(f"Test met expected passing rate of {threshold:.2f}. Actual rate: {success_count}/{max_attempts}") - else: - raise AssertionError( - f"Test did not meet expected passing rate of {threshold:.2f}. Actual rate: {success_count}/{max_attempts}" - ) - - return wrapper - - return decorator_retry - - -def retry_until_success(max_attempts=10, sleep_time_seconds=4): - """ - Decorator to retry a function until it succeeds or the maximum number of attempts is reached. - - :param max_attempts: Maximum number of attempts to retry the function. - :param sleep_time_seconds: Time to wait between attempts, in seconds. - """ - - def decorator_retry(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - for attempt in range(1, max_attempts + 1): - try: - return func(*args, **kwargs) - except Exception as e: - print(f"\033[93mAttempt {attempt} failed with error:\n{e}\033[0m") - if attempt == max_attempts: - raise - time.sleep(sleep_time_seconds) - - return wrapper - - return decorator_retry - - # ====================================================================================================================== # OPENAI TESTS # ====================================================================================================================== diff --git a/tests/test_sdk_client.py b/tests/test_sdk_client.py new file mode 100644 index 0000000000..71568abcb8 --- /dev/null +++ b/tests/test_sdk_client.py @@ -0,0 +1,593 @@ +import asyncio + +# import json +import os +import threading +import time +import uuid + +import pytest +from dotenv import load_dotenv +from letta_client import CreateBlock +from letta_client import Letta as LettaSDKClient +from letta_client import MessageCreate +from letta_client.core import ApiError +from letta_client.runs.types import GetRunMessagesResponseItem_ToolCallMessage +from letta_client.types import LettaRequestConfig, LettaResponseMessagesItem_ToolReturnMessage + +# Constants +SERVER_PORT = 8283 + + +def run_server(): + load_dotenv() + + from letta.server.rest_api.app import start_server + + print("Starting server...") + start_server(debug=True) + + +@pytest.fixture(scope="module") +def client(): + # Get URL from environment or start server + server_url = os.getenv("LETTA_SERVER_URL", f"http://localhost:{SERVER_PORT}") + if not os.getenv("LETTA_SERVER_URL"): + print("Starting server thread") + thread = threading.Thread(target=run_server, daemon=True) + thread.start() + time.sleep(5) + print("Running client tests with server:", server_url) + client = LettaSDKClient(base_url=server_url, token=None) + yield client + + +@pytest.fixture(scope="module") +def agent(client): + agent_state = client.agents.create( + memory_blocks=[ + CreateBlock( + label="human", + value="username: sarah", + ), + ], + llm="openai/gpt-4", + embedding="openai/text-embedding-ada-002", + ) + yield agent_state + + # delete agent + client.agents.delete(agent_id=agent_state.id) + + +def test_shared_blocks(client): + # create a block + block = client.blocks.create( + label="human", + value="username: sarah", + ) + + # create agents with shared block + agent_state1 = client.agents.create( + name="agent1", + memory_blocks=[ + CreateBlock( + label="persona", + value="you are agent 1", + ), + ], + block_ids=[block.id], + llm="openai/gpt-4", + embedding="openai/text-embedding-ada-002", + ) + agent_state2 = client.agents.create( + name="agent2", + memory_blocks=[ + CreateBlock( + label="persona", + value="you are agent 2", + ), + ], + block_ids=[block.id], + llm="openai/gpt-4", + embedding="openai/text-embedding-ada-002", + ) + + # update memory + client.agents.messages.create( + agent_id=agent_state1.id, + messages=[ + MessageCreate( + role="user", + text="my name is actually charles", + ) + ], + ) + + # check agent 2 memory + assert "charles" in client.blocks.get(block_id=block.id).value.lower(), f"Shared block update failed {client.get_block(block.id).value}" + + client.agents.messages.create( + agent_id=agent_state2.id, + messages=[ + MessageCreate( + role="user", + text="whats my name?", + ) + ], + ) + assert ( + "charles" in client.agents.core_memory.get_block(agent_id=agent_state2.id, block_label="human").value.lower() + ), f"Shared block update failed {client.agents.core_memory.get_block(agent_id=agent_state2.id, block_label="human").value}" + + # cleanup + client.agents.delete(agent_state1.id) + client.agents.delete(agent_state2.id) + + +def test_add_and_manage_tags_for_agent(client): + """ + Comprehensive happy path test for adding, retrieving, and managing tags on an agent. + """ + tags_to_add = ["test_tag_1", "test_tag_2", "test_tag_3"] + + # Step 0: create an agent with no tags + agent = client.agents.create( + memory_blocks=[ + CreateBlock( + label="human", + value="username: sarah", + ), + ], + llm="openai/gpt-4", + embedding="openai/text-embedding-ada-002", + ) + assert len(agent.tags) == 0 + + # Step 1: Add multiple tags to the agent + client.agents.update(agent_id=agent.id, tags=tags_to_add) + + # Step 2: Retrieve tags for the agent and verify they match the added tags + retrieved_tags = client.agents.get(agent_id=agent.id).tags + assert set(retrieved_tags) == set(tags_to_add), f"Expected tags {tags_to_add}, but got {retrieved_tags}" + + # Step 3: Retrieve agents by each tag to ensure the agent is associated correctly + for tag in tags_to_add: + agents_with_tag = client.agents.list(tags=[tag]) + assert agent.id in [a.id for a in agents_with_tag], f"Expected agent {agent.id} to be associated with tag '{tag}'" + + # Step 4: Delete a specific tag from the agent and verify its removal + tag_to_delete = tags_to_add.pop() + client.agents.update(agent_id=agent.id, tags=tags_to_add) + + # Verify the tag is removed from the agent's tags + remaining_tags = client.agents.get(agent_id=agent.id).tags + assert tag_to_delete not in remaining_tags, f"Tag '{tag_to_delete}' was not removed as expected" + assert set(remaining_tags) == set(tags_to_add), f"Expected remaining tags to be {tags_to_add[1:]}, but got {remaining_tags}" + + # Step 5: Delete all remaining tags from the agent + client.agents.update(agent_id=agent.id, tags=[]) + + # Verify all tags are removed + final_tags = client.agents.get(agent_id=agent.id).tags + assert len(final_tags) == 0, f"Expected no tags, but found {final_tags}" + + # Remove agent + client.agents.delete(agent.id) + + +def test_agent_tags(client): + """Test creating agents with tags and retrieving tags via the API.""" + # Create multiple agents with different tags + agent1 = client.agents.create( + memory_blocks=[ + CreateBlock( + label="human", + value="username: sarah", + ), + ], + llm="openai/gpt-4", + embedding="openai/text-embedding-ada-002", + tags=["test", "agent1", "production"], + ) + + agent2 = client.agents.create( + memory_blocks=[ + CreateBlock( + label="human", + value="username: sarah", + ), + ], + llm="openai/gpt-4", + embedding="openai/text-embedding-ada-002", + tags=["test", "agent2", "development"], + ) + + agent3 = client.agents.create( + memory_blocks=[ + CreateBlock( + label="human", + value="username: sarah", + ), + ], + llm="openai/gpt-4", + embedding="openai/text-embedding-ada-002", + tags=["test", "agent3", "production"], + ) + + # Test getting all tags + all_tags = client.tag.list_tags() + expected_tags = ["agent1", "agent2", "agent3", "development", "production", "test"] + assert sorted(all_tags) == expected_tags + + # Test pagination + paginated_tags = client.tag.list_tags(limit=2) + assert len(paginated_tags) == 2 + assert paginated_tags[0] == "agent1" + assert paginated_tags[1] == "agent2" + + # Test pagination with cursor + next_page_tags = client.tag.list_tags(cursor="agent2", limit=2) + assert len(next_page_tags) == 2 + assert next_page_tags[0] == "agent3" + assert next_page_tags[1] == "development" + + # Test text search + prod_tags = client.tag.list_tags(query_text="prod") + assert sorted(prod_tags) == ["production"] + + dev_tags = client.tag.list_tags(query_text="dev") + assert sorted(dev_tags) == ["development"] + + agent_tags = client.tag.list_tags(query_text="agent") + assert sorted(agent_tags) == ["agent1", "agent2", "agent3"] + + # Remove agents + client.agents.delete(agent1.id) + client.agents.delete(agent2.id) + client.agents.delete(agent3.id) + + +def test_update_agent_memory_label(client, agent): + """Test that we can update the label of a block in an agent's memory""" + current_labels = [block.label for block in client.agents.core_memory.get_blocks(agent_id=agent.id)] + example_label = current_labels[0] + example_new_label = "example_new_label" + assert example_new_label not in current_labels + + client.agents.core_memory.update_block( + agent_id=agent.id, + block_label=example_label, + label=example_new_label, + ) + + updated_block = client.agents.core_memory.get_block(agent_id=agent.id, block_label=example_new_label) + assert updated_block.label == example_new_label + + +def test_add_remove_agent_memory_block(client, agent): + """Test that we can add and remove a block from an agent's memory""" + current_labels = [block.label for block in client.agents.core_memory.get_blocks(agent_id=agent.id)] + example_new_label = current_labels[0] + "_v2" + example_new_value = "example value" + assert example_new_label not in current_labels + + # Link a new memory block + block = client.blocks.create( + label=example_new_label, + value=example_new_value, + limit=1000, + ) + client.blocks.link_agent_memory_block( + agent_id=agent.id, + block_id=block.id, + ) + + updated_block = client.agents.core_memory.get_block( + agent_id=agent.id, + block_label=example_new_label, + ) + assert updated_block.value == example_new_value + + # Now unlink the block + client.blocks.unlink_agent_memory_block( + agent_id=agent.id, + block_id=block.id, + ) + + current_labels = [block.label for block in client.agents.core_memory.get_blocks(agent_id=agent.id)] + assert example_new_label not in current_labels + + +def test_update_agent_memory_limit(client, agent): + """Test that we can update the limit of a block in an agent's memory""" + + current_labels = [block.label for block in client.agents.core_memory.get_blocks(agent_id=agent.id)] + example_label = current_labels[0] + example_new_limit = 1 + current_block = client.agents.core_memory.get_block(agent_id=agent.id, block_label=example_label) + current_block_length = len(current_block.value) + + assert example_new_limit != client.agents.core_memory.get_block(agent_id=agent.id, block_label=example_label).limit + assert example_new_limit < current_block_length + + # We expect this to throw a value error + with pytest.raises(ApiError): + client.agents.core_memory.update_block( + agent_id=agent.id, + block_label=example_label, + limit=example_new_limit, + ) + + # Now try the same thing with a higher limit + example_new_limit = current_block_length + 10000 + assert example_new_limit > current_block_length + client.agents.core_memory.update_block( + agent_id=agent.id, + block_label=example_label, + limit=example_new_limit, + ) + + assert example_new_limit == client.agents.core_memory.get_block(agent_id=agent.id, block_label=example_label).limit + + +def test_messages(client, agent): + send_message_response = client.agents.messages.create( + agent_id=agent.id, + messages=[ + MessageCreate( + role="user", + text="Test message", + ), + ], + ) + assert send_message_response, "Sending message failed" + + messages_response = client.agents.messages.list( + agent_id=agent.id, + limit=1, + ) + assert len(messages_response) > 0, "Retrieving messages failed" + + +def test_send_system_message(client, agent): + """Important unit test since the Letta API exposes sending system messages, but some backends don't natively support it (eg Anthropic)""" + send_system_message_response = client.agents.messages.create( + agent_id=agent.id, + messages=[ + MessageCreate( + role="system", + text="Event occurred: The user just logged off.", + ), + ], + ) + assert send_system_message_response, "Sending message failed" + + +def test_function_return_limit(client, agent): + """Test to see if the function return limit works""" + + def big_return(): + """ + Always call this tool. + + Returns: + important_data (str): Important data + """ + return "x" * 100000 + + tool = client.tools.upsert_from_function(func=big_return, name="big_return", return_char_limit=1000) + + client.agents.tools.add(agent_id=agent.id, tool_id=tool.id) + + # get function response + response = client.agents.messages.create( + agent_id=agent.id, + messages=[ + MessageCreate( + role="user", + text="call the big_return function", + ), + ], + config=LettaRequestConfig(use_assistant_message=False), + ) + + response_message = None + for message in response.messages: + if isinstance(message, LettaResponseMessagesItem_ToolReturnMessage): + response_message = message + break + + assert response_message, "ToolReturnMessage message not found in response" + res = response_message.tool_return + assert "function output was truncated " in res + + +def test_function_always_error(client, agent): + """Test to see if function that errors works correctly""" + + def always_error(): + """ + Always throw an error. + """ + return 5 / 0 + + tool = client.tools.upsert_from_function(func=always_error, name="always_error", return_char_limit=1000) + + client.agents.tools.add(agent_id=agent.id, tool_id=tool.id) + + # get function response + response = client.agents.messages.create( + agent_id=agent.id, + messages=[ + MessageCreate( + role="user", + text="call the always_error function", + ), + ], + config=LettaRequestConfig(use_assistant_message=False), + ) + + response_message = None + for message in response.messages: + if isinstance(message, LettaResponseMessagesItem_ToolReturnMessage): + response_message = message + break + + assert response_message, "ToolReturnMessage message not found in response" + assert response_message.status == "error" + assert response_message.tool_return == "Error executing function always_error: ZeroDivisionError: division by zero" + + +@pytest.mark.asyncio +async def test_send_message_parallel(client, agent): + """ + Test that sending two messages in parallel does not error. + """ + + # Define a coroutine for sending a message using asyncio.to_thread for synchronous calls + async def send_message_task(message: str): + response = await asyncio.to_thread( + client.agents.messages.create, + agent_id=agent.id, + messages=[ + MessageCreate( + role="user", + text=message, + ), + ], + ) + assert response, f"Sending message '{message}' failed" + return response + + # Prepare two tasks with different messages + messages = ["Test message 1", "Test message 2"] + tasks = [send_message_task(message) for message in messages] + + # Run the tasks concurrently + responses = await asyncio.gather(*tasks, return_exceptions=True) + + # Check for exceptions and validate responses + for i, response in enumerate(responses): + if isinstance(response, Exception): + pytest.fail(f"Task {i} failed with exception: {response}") + else: + assert response, f"Task {i} returned an invalid response: {response}" + + # Ensure both tasks completed + assert len(responses) == len(messages), "Not all messages were processed" + + +def test_send_message_async(client, agent): + """ + Test that we can send a message asynchronously and retrieve the messages, along with usage statistics + """ + test_message = "This is a test message, respond to the user with a sentence." + run = client.agents.messages.create_async( + agent_id=agent.id, + messages=[ + MessageCreate( + role="user", + text=test_message, + ), + ], + config=LettaRequestConfig(use_assistant_message=False), + ) + assert run.id is not None + assert run.status == "created" + + # Wait for the job to complete, cancel it if takes over 10 seconds + start_time = time.time() + while run.status == "created": + time.sleep(1) + run = client.runs.get_run(run_id=run.id) + print(f"Run status: {run.status}") + if time.time() - start_time > 10: + pytest.fail("Run took too long to complete") + + print(f"Run completed in {time.time() - start_time} seconds, run={run}") + assert run.status == "completed" + + # Get messages for the job + messages = client.runs.get_run_messages(run_id=run.id) + assert len(messages) >= 2 # At least assistant response + + # Check filters + assistant_messages = client.runs.get_run_messages(run_id=run.id, role="assistant") + assert len(assistant_messages) > 0 + tool_messages = client.runs.get_run_messages(run_id=run.id, role="tool") + assert len(tool_messages) > 0 + + specific_tool_messages = [ + message + for message in client.runs.get_run_messages(run_id=run.id) + if isinstance(message, GetRunMessagesResponseItem_ToolCallMessage) + ] + assert specific_tool_messages[0].tool_call.name == "send_message" + assert len(specific_tool_messages) > 0 + + # Get and verify usage statistics + usage = client.runs.get_run_usage(run_id=run.id) + assert usage.completion_tokens >= 0 + assert usage.prompt_tokens >= 0 + assert usage.total_tokens >= 0 + assert usage.total_tokens == usage.completion_tokens + usage.prompt_tokens + + +def test_agent_creation(client): + """Test that block IDs are properly attached when creating an agent.""" + offline_memory_agent_system = """ + You are a helpful agent. You will be provided with a list of memory blocks and a user preferences block. + You should use the memory blocks to remember information about the user and their preferences. + You should also use the user preferences block to remember information about the user's preferences. + """ + + # Create a test block that will represent user preferences + user_preferences_block = client.blocks.create( + label="user_preferences", + value="", + limit=10000, + ) + + # Create test tools + def test_tool(): + """A simple test tool.""" + return "Hello from test tool!" + + def another_test_tool(): + """Another test tool.""" + return "Hello from another test tool!" + + tool1 = client.tools.upsert_from_function(func=test_tool, name="test_tool", tags=["test"]) + tool2 = client.tools.upsert_from_function(func=another_test_tool, name="another_test_tool", tags=["test"]) + + # Create test blocks + offline_persona_block = client.blocks.create(label="persona", value="persona description", limit=5000) + mindy_block = client.blocks.create(label="mindy", value="Mindy is a helpful assistant", limit=5000) + + # Create agent with the blocks and tools + agent = client.agents.create( + name=f"test_agent_{str(uuid.uuid4())}", + memory_blocks=[offline_persona_block, mindy_block], + llm="openai/gpt-4", + embedding="openai/text-embedding-ada-002", + tool_ids=[tool1.id, tool2.id], + include_base_tools=False, + tags=["test"], + block_ids=[user_preferences_block.id], + ) + + # Verify the agent was created successfully + assert agent is not None + assert agent.id is not None + + # Verify all memory blocks are properly attached + for block in [offline_persona_block, mindy_block, user_preferences_block]: + agent_block = client.agents.core_memory.get_block(agent_id=agent.id, block_label=block.label) + assert block.value == agent_block.value and block.limit == agent_block.limit + + # Verify the tools are properly attached + agent_tools = client.agents.tools.list(agent_id=agent.id) + assert len(agent_tools) == 2 + tool_ids = {tool1.id, tool2.id} + assert all(tool.id in tool_ids for tool in agent_tools) diff --git a/tests/test_v1_routes.py b/tests/test_v1_routes.py index 5093fe93b3..8394e61ef8 100644 --- a/tests/test_v1_routes.py +++ b/tests/test_v1_routes.py @@ -1,9 +1,12 @@ +from datetime import datetime from unittest.mock import MagicMock, Mock, patch import pytest from composio.client.collections import ActionModel, ActionParametersModel, ActionResponseModel, AppModel from fastapi.testclient import TestClient +from letta.orm.errors import NoResultFound +from letta.schemas.message import UserMessage from letta.schemas.tool import ToolCreate, ToolUpdate from letta.server.rest_api.app import app from letta.server.rest_api.utils import get_letta_server @@ -47,7 +50,6 @@ def create_integers_tool(add_integers_tool): name=add_integers_tool.name, description=add_integers_tool.description, tags=add_integers_tool.tags, - module=add_integers_tool.module, source_code=add_integers_tool.source_code, source_type=add_integers_tool.source_type, json_schema=add_integers_tool.json_schema, @@ -61,7 +63,6 @@ def update_integers_tool(add_integers_tool): name=add_integers_tool.name, description=add_integers_tool.description, tags=add_integers_tool.tags, - module=add_integers_tool.module, source_code=add_integers_tool.source_code, source_type=add_integers_tool.source_type, json_schema=add_integers_tool.json_schema, @@ -328,3 +329,154 @@ def test_add_composio_tool(client, mock_sync_server, add_integers_tool): # Verify the mocked from_composio method was called mock_from_composio.assert_called_once_with(action_name=add_integers_tool.name, api_key="mock_composio_api_key") + + +# ====================================================================================================================== +# Runs Routes Tests +# ====================================================================================================================== + + +def test_get_run_messages(client, mock_sync_server): + """Test getting messages for a run.""" + # Create properly formatted mock messages + current_time = datetime.utcnow() + mock_messages = [ + UserMessage( + id=f"message-{i:08x}", + date=current_time, + message=f"Test message {i}", + ) + for i in range(2) + ] + + # Configure mock server responses + mock_sync_server.user_manager.get_user_or_default.return_value = Mock(id="user-123") + mock_sync_server.job_manager.get_run_messages_cursor.return_value = mock_messages + + # Test successful retrieval + response = client.get( + "/v1/runs/run-12345678/messages", + headers={"user_id": "user-123"}, + params={ + "limit": 10, + "cursor": mock_messages[0].id, + "role": "user", + "ascending": True, + }, + ) + assert response.status_code == 200 + assert len(response.json()) == 2 + assert response.json()[0]["id"] == mock_messages[0].id + assert response.json()[1]["id"] == mock_messages[1].id + + # Verify mock calls + mock_sync_server.user_manager.get_user_or_default.assert_called_once_with(user_id="user-123") + mock_sync_server.job_manager.get_run_messages_cursor.assert_called_once_with( + run_id="run-12345678", + actor=mock_sync_server.user_manager.get_user_or_default.return_value, + limit=10, + cursor=mock_messages[0].id, + ascending=True, + role="user", + ) + + +def test_get_run_messages_not_found(client, mock_sync_server): + """Test getting messages for a non-existent run.""" + # Configure mock responses + error_message = "Run 'run-nonexistent' not found" + mock_sync_server.user_manager.get_user_or_default.return_value = Mock(id="user-123") + mock_sync_server.job_manager.get_run_messages_cursor.side_effect = NoResultFound(error_message) + + response = client.get("/v1/runs/run-nonexistent/messages", headers={"user_id": "user-123"}) + + assert response.status_code == 404 + assert error_message in response.json()["detail"] + + +def test_get_run_usage(client, mock_sync_server): + """Test getting usage statistics for a run.""" + # Configure mock responses + mock_sync_server.user_manager.get_user_or_default.return_value = Mock(id="user-123") + mock_usage = Mock( + completion_tokens=100, + prompt_tokens=200, + total_tokens=300, + ) + mock_sync_server.job_manager.get_job_usage.return_value = mock_usage + + # Make request + response = client.get("/v1/runs/run-12345678/usage", headers={"user_id": "user-123"}) + + # Check response + assert response.status_code == 200 + assert response.json() == { + "completion_tokens": 100, + "prompt_tokens": 200, + "total_tokens": 300, + } + + # Verify mock calls + mock_sync_server.user_manager.get_user_or_default.assert_called_once_with(user_id="user-123") + mock_sync_server.job_manager.get_job_usage.assert_called_once_with( + job_id="run-12345678", + actor=mock_sync_server.user_manager.get_user_or_default.return_value, + ) + + +def test_get_run_usage_not_found(client, mock_sync_server): + """Test getting usage statistics for a non-existent run.""" + # Configure mock responses + error_message = "Run 'run-nonexistent' not found" + mock_sync_server.user_manager.get_user_or_default.return_value = Mock(id="user-123") + mock_sync_server.job_manager.get_job_usage.side_effect = NoResultFound(error_message) + + # Make request + response = client.get("/v1/runs/run-nonexistent/usage", headers={"user_id": "user-123"}) + + assert response.status_code == 404 + assert error_message in response.json()["detail"] + + +# ====================================================================================================================== +# Tags Routes Tests +# ====================================================================================================================== + + +def test_get_tags(client, mock_sync_server): + """Test basic tag listing""" + mock_sync_server.agent_manager.list_tags.return_value = ["tag1", "tag2"] + + response = client.get("/v1/tags", headers={"user_id": "test_user"}) + + assert response.status_code == 200 + assert response.json() == ["tag1", "tag2"] + mock_sync_server.agent_manager.list_tags.assert_called_once_with( + actor=mock_sync_server.user_manager.get_user_or_default.return_value, cursor=None, limit=50, query_text=None + ) + + +def test_get_tags_with_pagination(client, mock_sync_server): + """Test tag listing with pagination parameters""" + mock_sync_server.agent_manager.list_tags.return_value = ["tag3", "tag4"] + + response = client.get("/v1/tags", params={"cursor": "tag2", "limit": 2}, headers={"user_id": "test_user"}) + + assert response.status_code == 200 + assert response.json() == ["tag3", "tag4"] + mock_sync_server.agent_manager.list_tags.assert_called_once_with( + actor=mock_sync_server.user_manager.get_user_or_default.return_value, cursor="tag2", limit=2, query_text=None + ) + + +def test_get_tags_with_search(client, mock_sync_server): + """Test tag listing with text search""" + mock_sync_server.agent_manager.list_tags.return_value = ["user_tag1", "user_tag2"] + + response = client.get("/v1/tags", params={"query_text": "user"}, headers={"user_id": "test_user"}) + + assert response.status_code == 200 + assert response.json() == ["user_tag1", "user_tag2"] + mock_sync_server.agent_manager.list_tags.assert_called_once_with( + actor=mock_sync_server.user_manager.get_user_or_default.return_value, cursor=None, limit=50, query_text="user" + )