forked from quantalogic/quantalogic
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path02-agent-with-event-monitoring.py
executable file
·74 lines (64 loc) · 2.51 KB
/
02-agent-with-event-monitoring.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
#!/usr/bin/env -S uv run
# /// script
# requires-python = ">=3.12"
# dependencies = [
# "quantalogic",
# ]
# ///
import os
from quantalogic import Agent
from quantalogic.console_print_events import console_print_events
from quantalogic.console_print_token import console_print_token
from quantalogic.tools import (
LLMTool,
)
# MODEL_NAME = "deepseek/deepseek-chat"
MODEL_NAME = "ovh/DeepSeek-R1-Distill-Llama-70B"
# Verify API key is set - required for authentication with DeepSeek's API
# This early check prevents runtime failures and ensures proper initialization
# We validate credentials before any API calls to maintain system reliability
if not os.environ.get("DEEPSEEK_API_KEY"):
raise ValueError("DEEPSEEK_API_KEY environment variable is not set")
# Initialize agent with DeepSeek model and LLM tool
# The LLM tool's dual-purpose design was chosen to:
# 1. Maintain cognitive consistency by using the same model for reasoning
# 2. Enable creative exploration through latent space manipulation
# This architecture reduces model switching overhead and ensures
# coherent behavior across different operational modes
agent = Agent(
model_name=MODEL_NAME,
tools=[LLMTool(model_name=MODEL_NAME, name="deepseek_llm_tool", on_token=console_print_token)],
)
# Set up event monitoring to track agent's lifecycle
# The event system was implemented to:
# 1. Provide real-time observability into the agent's operations
# 2. Enable debugging and performance monitoring
# 3. Support future analytics and optimization efforts
agent.event_emitter.on(
event=[
"task_complete",
"task_think_start",
"task_think_end",
"tool_execution_start",
"tool_execution_end",
"error_max_iterations_reached",
"memory_full",
"memory_compacted",
"memory_summary",
],
listener=console_print_events,
)
agent.event_emitter.on(event=["stream_chunk"], listener=console_print_token)
# Execute a multi-step task showcasing agent's capabilities
# This example was designed to demonstrate:
# 1. The system's ability to handle complex, multi-domain tasks
# 2. Integration of creative and analytical capabilities
# 3. The agent's capacity for contextual understanding and adaptation
result = agent.solve_task(
"1. Write a poem in English about a dog. "
"2. Translate the poem into French. "
"3. Choose 2 French authors"
"4. Rewrite the translated poem with the style of the chosen authors. ",
streaming=True, # Enable streaming to see token-by-token output
)
print(result)