diff --git a/docs/astro.config.mjs b/docs/astro.config.mjs index 59ce562..4e41a0b 100644 --- a/docs/astro.config.mjs +++ b/docs/astro.config.mjs @@ -50,12 +50,7 @@ export default defineConfig({ ] }, { label: 'Custom Classifier', link: '/classifiers/custom-classifier' }, - { - label: 'Examples', - items: [ - { label: 'Ollama Classifier', link: '/classifiers/examples/ollama-classifier'}, - ] - }, + ] }, { @@ -77,13 +72,7 @@ export default defineConfig({ ] }, { label: 'Custom Agents', link: '/agents/custom-agents' }, - { - label: 'Examples', - items: [ - { label: 'Ollama Agent', link: '/agents/examples/ollama-agent'}, - { label: 'Api Agent', link: '/agents/examples/api-agent'}, - ] - }, + ] }, { @@ -114,29 +103,59 @@ export default defineConfig({ ] }, { - label: 'Advanced Features', - items: [ - { label: 'Agent Overlap Analysis', link: '/advanced-features/agent-overlap' }, - { label: 'Create a Weather Agent using Tools', link: '/advanced-features/weather-tool-use' }, - { label: 'Create a Math Agent using Tools', link: '/advanced-features/math-tool-use' }, - { label: 'Logging', link: '/advanced-features/logging' }, - ] - }, - { - label: 'Deployment', + label: 'Cookbook', items: [ - { label: 'Local Development', link: '/deployment/local' }, - { label: 'AWS Lambda Typescript', link: '/deployment/aws-lambda-ts' }, - { label: 'AWS Lambda Python', link: '/deployment/aws-lambda-py' }, - { label: 'Demo Web App', link: '/deployment/demo-web-app' }, + { + label: 'Getting Started', + items: [ + { label: 'Overview', link: '/cookbook/overview' }, + { label: 'Local Development', link: '/cookbook/getting-started/local-development' } + ] + }, + { + label: 'Examples', + items: [ + { label: 'Chat Chainlit App', link: '/cookbook/examples/chat-chainlit-app' }, + { label: 'Chat Demo App', link: '/cookbook/examples/chat-demo-app' }, + { label: 'E-commerce Support Simulator', link: '/cookbook/examples/ecommerce-support-simulator' }, + { label: 'Fast API Streaming', link: '/cookbook/examples/fast-api-streaming' }, + { label: 'Typescript Local Demo', link: '/cookbook/examples/typescript-local-demo' }, + { label: 'Python Local Demo', link: '/cookbook/examples/python-local-demo' }, + { label: 'Api Agent', link: '/cookbook/examples/api-agent' }, + { label: 'Ollama Agent', link: '/cookbook/examples/ollama-agent' }, + { label: 'Ollama Classifier', link: '/cookbook/examples/ollama-classifier' } + ] + }, + { + label: 'Lambda Implementations', + items: [ + { label: 'Python Lambda', link: '/cookbook/lambda/aws-lambda-python' }, + { label: 'NodeJs Lambda', link: '/cookbook/lambda/aws-lambda-nodejs' } + ] + }, + { + label: 'Tool Integration', + items: [ + { label: 'Weather API Integration', link: '/cookbook/tools/weather-api' }, + { label: 'Math Operations', link: '/cookbook/tools/math-operations' } + ] + }, + { + label: 'Routing Patterns', + items: [ + { label: 'Cost-Efficient Routing', link: '/cookbook/patterns/cost-efficient' }, + { label: 'Multi-lingual Routing', link: '/cookbook/patterns/multi-lingual' } + ] + }, + { + label: 'Optimization & Monitoring', + items: [ + { label: 'Agent Overlap Analysis', link: '/cookbook/monitoring/agent-overlap' }, + { label: 'Logging and Monitoring', link: '/cookbook/monitoring/logging' } + ] + } ] - }, - { - label: 'Use cases', - items: [ - { label: 'Use case examples', link: '/use-cases/use-cases' }, - ] - }, + } ] }) ] diff --git a/docs/public/ai-powered_e-commerce_support_simulator.png b/docs/public/ai-powered_e-commerce_support_simulator.png new file mode 100644 index 0000000..76118a8 Binary files /dev/null and b/docs/public/ai-powered_e-commerce_support_simulator.png differ diff --git a/docs/public/ai_e-commerce_support_system.png b/docs/public/ai_e-commerce_support_system.png new file mode 100644 index 0000000..31b867f Binary files /dev/null and b/docs/public/ai_e-commerce_support_system.png differ diff --git a/docs/public/chat_mode.png b/docs/public/chat_mode.png new file mode 100644 index 0000000..4fd0658 Binary files /dev/null and b/docs/public/chat_mode.png differ diff --git a/docs/public/email_mode.png b/docs/public/email_mode.png new file mode 100644 index 0000000..53e4cf1 Binary files /dev/null and b/docs/public/email_mode.png differ diff --git a/docs/src/content/docs/agents/examples/api-agent.mdx b/docs/src/content/docs/cookbook/examples/api-agent.md similarity index 100% rename from docs/src/content/docs/agents/examples/api-agent.mdx rename to docs/src/content/docs/cookbook/examples/api-agent.md diff --git a/docs/src/content/docs/cookbook/examples/chat-chainlit-app.md b/docs/src/content/docs/cookbook/examples/chat-chainlit-app.md new file mode 100644 index 0000000..d313b46 --- /dev/null +++ b/docs/src/content/docs/cookbook/examples/chat-chainlit-app.md @@ -0,0 +1,63 @@ +--- +title: Chat Chainlit App with Multi-Agent Orchestrator +description: How to set up a Chainlit App using Multi-Agent Orchestrator +--- + +This example demonstrates how to build a chat application using Chainlit and the Multi-Agent Orchestrator. It showcases a system with three specialized agents (Tech, Travel, and Health) working together through a streaming-enabled chat interface. + +## Key Features +- Streaming responses using Chainlit's real-time capabilities +- Integration with multiple agent types (Bedrock and Ollama) +- Custom classifier configuration using Claude 3 Haiku +- Session management for user interactions +- Complete chat history handling + +## Quick Start +```bash +# Clone the repository +git clone https://github.com/awslabs/multi-agent-orchestrator.git +cd multi-agent-orchestrator/examples/chat-chainlit-app + +# Install dependencies +pip install -r requirements.txt + +# Run the application +python app.py +``` + +## Implementation Details + +### Components +1. **Main Application** (`app.py`) + - Orchestrator setup with custom Bedrock classifier + - Chainlit event handlers for chat management + - Streaming response handling + +2. **Agent Configuration** (`agents.py`) + - Tech Agent: Uses Claude 3 Sonnet via Bedrock + - Travel Agent: Uses Claude 3 Sonnet via Bedrock + - Health Agent: Uses Ollama with Llama 3.1 + +3. **Custom Integration** (`ollamaAgent.py`) + - Custom implementation for Ollama integration + - Streaming support for real-time responses + +## Usage Notes +- The application creates unique user and session IDs for each chat session +- Responses are streamed in real-time using Chainlit's streaming capabilities +- The system automatically routes queries to the most appropriate agent +- Complete chat history is maintained throughout the session + +## Example Interaction +```plaintext +User: "What are the latest trends in AI?" +→ Routed to Tech Agent + +User: "Plan a trip to Paris" +→ Routed to Travel Agent + +User: "Recommend a workout routine" +→ Routed to Health Agent +``` + +Ready to build your own multi-agent chat application? Check out the complete [source code]() in our GitHub repository. diff --git a/docs/src/content/docs/deployment/demo-web-app.md b/docs/src/content/docs/cookbook/examples/chat-demo-app.md similarity index 95% rename from docs/src/content/docs/deployment/demo-web-app.md rename to docs/src/content/docs/cookbook/examples/chat-demo-app.md index 25d9510..27cb571 100644 --- a/docs/src/content/docs/deployment/demo-web-app.md +++ b/docs/src/content/docs/cookbook/examples/chat-demo-app.md @@ -140,4 +140,6 @@ By deploying this demo web app, you can interact with your Multi-Agent Orchestra ## ⚠️ Disclamer This demo application is intended solely for demonstration purposes. It is not designed for handling, storing, or processing any kind of Personally Identifiable Information (PII) or personal data. Users are strongly advised not to enter, upload, or use any PII or personal data within this application. Any use of PII or personal data is at the user's own risk and the developers of this application shall not be held responsible for any data breaches, misuse, or any other related issues. Please ensure that all data used in this demo is non-sensitive and anonymized. -For production usage, it is crucial to implement proper security measures to protect PII and personal data. This includes obtaining proper permissions from users, utilizing encryption for data both in transit and at rest, and adhering to industry standards and regulations to maximize security. Failure to do so may result in data breaches and other serious security issues. \ No newline at end of file +For production usage, it is crucial to implement proper security measures to protect PII and personal data. This includes obtaining proper permissions from users, utilizing encryption for data both in transit and at rest, and adhering to industry standards and regulations to maximize security. Failure to do so may result in data breaches and other serious security issues. + +Ready to build your own multi-agent chat application? Check out the complete [source code](https://github.com/awslabs/multi-agent-orchestrator/tree/main/examples/chat-demo-app) in our GitHub repository. diff --git a/docs/src/content/docs/cookbook/examples/ecommerce-support-simulator.md b/docs/src/content/docs/cookbook/examples/ecommerce-support-simulator.md new file mode 100644 index 0000000..99e3687 --- /dev/null +++ b/docs/src/content/docs/cookbook/examples/ecommerce-support-simulator.md @@ -0,0 +1,231 @@ +--- +title: AI-Powered E-commerce Support Simulator +description: How to deploy the demo AI-Powered E-commerce Support Simulator +--- + +This project demonstrates the practical application of AI agents and human-in-the-loop interactions in an e-commerce support context. It showcases how AI can handle customer queries efficiently while seamlessly integrating human support when needed. + +## Overview + +The AI-Powered E-commerce Support Simulator is designed to showcase a sophisticated customer support system that combines AI agents with human support. It demonstrates how AI can handle routine queries automatically while routing complex issues to human agents, providing a comprehensive support experience. + +## Features + +- AI-powered response generation for common queries +- Intelligent routing of complex issues to human support +- Real-time chat functionality +- Email-style communication option + +## UI Modes + +### Chat Mode + +The Chat Mode provides a real-time conversation interface, simulating instant messaging between customers and the support system. It features: + +- Separate chat windows for customer and support perspectives +- Real-time message updates +- Automatic scrolling to the latest message + +![Chat Mode Screenshot](/multi-agent-orchestrator/chat_mode.png) + +### Email Mode + +The Email Mode simulates asynchronous email communication. It includes: + +- Email composition interfaces for both customer and support +- Pre-defined email templates for common scenarios +- Response viewing areas for both parties + +![Chat Mode Screenshot](/multi-agent-orchestrator/email_mode.png) + +## Mock Data + +The project includes a `mock_data.json` file for testing and demonstration purposes. This file contains sample data that simulates various customer scenarios, product information, and order details. + +To view and use the mock data: + +1. Navigate to the `public` directory in the project. +2. Open the `mock_data.json` file to view its contents. +3. Use the provided data to test different support scenarios and observe how the system handles various queries. + +## AI and Human Interaction + +This simulator demonstrates the seamless integration of AI agents and human support: + +- Automated Handling: AI agents automatically process and respond to common or straightforward queries. +- Human Routing: Complex or sensitive issues are identified and routed to human support agents. +- Customer Notification: When a query is routed to human support, the customer receives an automatic confirmation. +- Support Interface: The support side of the interface allows human agents to see which messages require their attention and respond accordingly. +- Handoff Visibility: Users can observe when a query is handled by AI and when it's transferred to a human agent. + +## Getting Started + +[TODO] + +This simulator serves as a practical example of how AI and human support can be integrated effectively in a customer service environment. It demonstrates the potential for enhancing efficiency while maintaining the ability to provide personalized, human touch when necessary.# AI-Powered E-commerce Support Simulator + +A demonstration of how AI agents and human support can work together in an e-commerce customer service environment. This project showcases intelligent query routing, multi-agent collaboration, and seamless human integration for complex support scenarios. + +## 🎯 Key Features + +- Multi-agent AI orchestration +- Real-time and asynchronous communication modes +- Integration with human support workflow +- Tool-augmented AI interactions +- Production-ready AWS architecture +- Mock data for realistic scenarios + +## 🏗️ System Architecture + +### Agent Architecture + +![Agents](/multi-agent-orchestrator/ai_e-commerce_support_system.png) + +The system employs three specialized agents: + +#### 1. Order Management Agent (Claude 3 Sonnet) +- 🎯 **Purpose**: Handles order-related inquiries +- 🛠️ **Tools**: + - `orderlookup`: Retrieves order details + - `shipmenttracker`: Tracks shipping status + - `returnprocessor`: Manages returns +- ✨ **Capabilities**: + - Real-time order tracking + - Return processing + - Refund handling + +#### 2. Product Information Agent (Claude 3 Haiku) +- 🎯 **Purpose**: Product information and specifications +- 🧠 **Knowledge Base**: Integrated product database +- ✨ **Capabilities**: + - Product specifications + - Compatibility checking + - Availability information + +#### 3. Human Agent +- 🎯 **Purpose**: Complex case handling and oversight +- ✨ **Capabilities**: + - Complex complaint resolution + - Critical decision oversight + - AI response verification + +### AWS Infrastructure + +![Infrastructure](/multi-agent-orchestrator/ai-powered_e-commerce_support_simulator.png) + +#### Core Components +- 🌐 **Frontend**: React + CloudFront +- 🔌 **API**: AppSync GraphQL +- 📨 **Messaging**: SQS queues +- ⚡ **Processing**: Lambda functions +- 💾 **Storage**: DynamoDB + S3 +- 🔐 **Auth**: Cognito + +## 💬 Communication Modes + +### Real-Time Chat +![Chat Mode](/multi-agent-orchestrator/chat_mode.png) +- Instant messaging interface +- Real-time response streaming +- Automatic routing + +### Email-Style +![Email Mode](/multi-agent-orchestrator/email_mode.png) +- Asynchronous communication +- Template-based responses +- Structured conversations + +## 🛠️ Mock System Integration + +### Mock Data Structure +The `mock_data.json` provides realistic test data: +```json +{ + "orders": {...}, + "products": {...}, + "shipping": {...} +} +``` + +### Tool Integration +- Order management tools use mock database +- Shipment tracking simulates real-time updates +- Return processing demonstrates workflow + +## 🚀 Deployment Guide + +### Prerequisites +- AWS account with permissions +- AWS CLI configured +- Node.js and npm +- AWS CDK CLI + +### Quick Start +```bash +# Clone repository +git clone https://github.com/awslabs/multi-agent-orchestrator.git +cd multi-agent-orchestrator/examples/ecommerce-support-simulator + +# Install and deploy +npm install +cdk bootstrap +cdk deploy + +# Create user +aws cognito-idp admin-create-user \ + --user-pool-id your-region_xxxxxxx \ + --username your@email.com \ + --user-attributes Name=email,Value=your@email.com \ + --temporary-password "MyChallengingPassword" \ + --message-action SUPPRESS \ + --region your-region +``` + +## 🔍 Demo Scenarios + +1. **Order Management** + - Order status inquiries + - Shipment tracking + - Return requests + +2. **Product Support** + - Product specifications + - Compatibility checks + - Availability queries + +3. **Complex Cases** + - Multi-step resolutions + - Human escalation + - Critical decisions + +## 🧹 Cleanup +```bash +cdk destroy +``` + +## 🔧 Troubleshooting + +Common issues and solutions: +1. **Deployment Failures** + - Verify AWS credentials + - Check permissions + - Review CloudFormation logs + +2. **Runtime Issues** + - Validate mock data format + - Check queue configurations + - Verify Lambda logs + +## ⚠️ Disclaimer + +This demo application is intended solely for demonstration purposes. It is not designed for handling, storing, or processing any kind of Personally Identifiable Information (PII) or personal data. Users are strongly advised not to enter, upload, or use any PII or personal data within this application. Any use of PII or personal data is at the user's own risk and the developers of this application shall not be held responsible for any data breaches, misuse, or any other related issues. Please ensure that all data used in this demo is non-sensitive and anonymized. + +For production usage, it is crucial to implement proper security measures to protect PII and personal data. This includes obtaining proper permissions from users, utilizing encryption for data both in transit and at rest, and adhering to industry standards and regulations to maximize security. Failure to do so may result in data breaches and other serious security issues. +## 📚 Additional Resources + +- [Multi-Agent Orchestrator Documentation](https://github.com/awslabs/multi-agent-orchestrator) +- [AWS AppSync Documentation](https://docs.aws.amazon.com/appsync) +- [Claude API Documentation](https://docs.anthropic.com/claude/reference) + + +Ready to build your own multi-agent chat application? Check out the complete [source code](https://github.com/awslabs/multi-agent-orchestrator/tree/main/examples/ecommerce-support-simulator) in our GitHub repository. diff --git a/docs/src/content/docs/cookbook/examples/fast-api-streaming.md b/docs/src/content/docs/cookbook/examples/fast-api-streaming.md new file mode 100644 index 0000000..cc354c4 --- /dev/null +++ b/docs/src/content/docs/cookbook/examples/fast-api-streaming.md @@ -0,0 +1,64 @@ +--- +title: FastAPI Streaming +description: How to deploy use FastAPI Streaming with Multi-Agent Orchestrator +--- + +This example demonstrates how to implement streaming responses with the Multi-Agent Orchestrator using FastAPI. It shows how to build a simple API that streams responses from multiple AI agents in real-time. + +## Features +- Real-time streaming responses using FastAPI's `StreamingResponse` +- Custom streaming handler implementation +- Multiple agent support (Tech and Health agents) +- Queue-based token streaming +- CORS-enabled API endpoint + +## Quick Start +```bash +# Install dependencies +pip install "fastapi[all]" multi-agent-orchestrator + +# Run the server +uvicorn app:app --reload +``` + +## API Endpoint + +```bash +POST /stream_chat/ +``` + +Request body: +```json +{ + "content": "your question here", + "user_id": "user123", + "session_id": "session456" +} +``` + +## Implementation Highlights +- Uses FastAPI's event streaming capabilities +- Custom callback handler for real-time token streaming +- Thread-safe queue implementation for token management +- Configurable orchestrator with multiple specialized agents +- Error handling and proper stream closure + +## Example Usage +```python +import requests + +response = requests.post( + 'http://localhost:8000/stream_chat/', + json={ + 'content': 'What are the latest AI trends?', + 'user_id': 'user123', + 'session_id': 'session456' + }, + stream=True +) + +for chunk in response.iter_content(): + print(chunk.decode(), end='', flush=True) +``` + +Ready to build your own multi-agent chat application? Check out the complete [source code](https://github.com/awslabs/multi-agent-orchestrator/tree/main/examples/fast-api-streaming) in our GitHub repository. diff --git a/docs/src/content/docs/agents/examples/ollama-agent.mdx b/docs/src/content/docs/cookbook/examples/ollama-agent.md similarity index 100% rename from docs/src/content/docs/agents/examples/ollama-agent.mdx rename to docs/src/content/docs/cookbook/examples/ollama-agent.md diff --git a/docs/src/content/docs/classifiers/examples/ollama-classifier.mdx b/docs/src/content/docs/cookbook/examples/ollama-classifier.mdx similarity index 100% rename from docs/src/content/docs/classifiers/examples/ollama-classifier.mdx rename to docs/src/content/docs/cookbook/examples/ollama-classifier.mdx diff --git a/docs/src/content/docs/cookbook/examples/python-local-demo.md b/docs/src/content/docs/cookbook/examples/python-local-demo.md new file mode 100644 index 0000000..e6ab857 --- /dev/null +++ b/docs/src/content/docs/cookbook/examples/python-local-demo.md @@ -0,0 +1,112 @@ +--- +title: Python Local Demo +description: How to run the Multi-Agent Orchestrator System locally using Python +--- + + +## Prerequisites +- Python 3.12 or later +- AWS account with appropriate permissions +- Basic familiarity with Python async/await patterns + +## Quick Setup + +1. Create a new project: +```bash +mkdir test_multi_agent_orchestrator +cd test_multi_agent_orchestrator +python -m venv venv +source venv/bin/activate # On Windows use `venv\Scripts\activate` +``` + +2. Install dependencies: +```bash +pip install multi-agent-orchestrator +``` + +## Implementation + +1. Create a new file named `quickstart.py`: + +2. Initialize the orchestrator: +```python +from multi_agent_orchestrator.orchestrator import MultiAgentOrchestrator, OrchestratorConfig +from multi_agent_orchestrator.agents import (BedrockLLMAgent, + BedrockLLMAgentOptions, + AgentResponse, + AgentCallbacks) +from multi_agent_orchestrator.types import ConversationMessage, ParticipantRole + +orchestrator = MultiAgentOrchestrator(options=OrchestratorConfig( + LOG_AGENT_CHAT=True, + LOG_CLASSIFIER_CHAT=True, + LOG_CLASSIFIER_RAW_OUTPUT=True, + LOG_CLASSIFIER_OUTPUT=True, + LOG_EXECUTION_TIMES=True, + MAX_RETRIES=3, + USE_DEFAULT_AGENT_IF_NONE_IDENTIFIED=True, + MAX_MESSAGE_PAIRS_PER_AGENT=10 +)) +``` + +3. Set up agent callbacks and add an agent: +```python +class BedrockLLMAgentCallbacks(AgentCallbacks): + def on_llm_new_token(self, token: str) -> None: + # handle response streaming here + print(token, end='', flush=True) + +tech_agent = BedrockLLMAgent(BedrockLLMAgentOptions( + name="Tech Agent", + streaming=True, + description="Specializes in technology areas including software development, hardware, AI, \ + cybersecurity, blockchain, cloud computing, emerging tech innovations, and pricing/costs \ + related to technology products and services.", + model_id="anthropic.claude-3-sonnet-20240229-v1:0", + callbacks=BedrockLLMAgentCallbacks() +)) +orchestrator.add_agent(tech_agent) +``` + +4. Implement the main logic: +```python +async def handle_request(_orchestrator: MultiAgentOrchestrator, _user_input: str, _user_id: str, _session_id: str): + response: AgentResponse = await _orchestrator.route_request(_user_input, _user_id, _session_id) + print("\nMetadata:") + print(f"Selected Agent: {response.metadata.agent_name}") + if response.streaming: + print('Response:', response.output.content[0]['text']) + else: + print('Response:', response.output.content[0]['text']) + +if __name__ == "__main__": + USER_ID = "user123" + SESSION_ID = str(uuid.uuid4()) + print("Welcome to the interactive Multi-Agent system. Type 'quit' to exit.") + while True: + user_input = input("\nYou: ").strip() + if user_input.lower() == 'quit': + print("Exiting the program. Goodbye!") + sys.exit() + asyncio.run(handle_request(orchestrator, user_input, USER_ID, SESSION_ID)) +``` + +5. Run the application: +```bash +python quickstart.py +``` + +## Implementation Notes +- Implements streaming responses by default +- Uses default Bedrock Classifier with `anthropic.claude-3-5-sonnet-20240620-v1:0` +- Includes interactive command-line interface +- Handles session management with UUID generation + +## Next Steps +- Add additional specialized agents +- Implement persistent storage +- Customize the classifier configuration +- Add error handling and retry logic + + +Ready to build your own multi-agent chat application? Check out the complete [source code](https://github.com/awslabs/multi-agent-orchestrator/tree/main/examples/python-demo) in our GitHub repository. diff --git a/docs/src/content/docs/cookbook/examples/typescript-local-demo.md b/docs/src/content/docs/cookbook/examples/typescript-local-demo.md new file mode 100644 index 0000000..e412404 --- /dev/null +++ b/docs/src/content/docs/cookbook/examples/typescript-local-demo.md @@ -0,0 +1,105 @@ +--- +title: TypeScript Local Demo +description: How to run the Multi-Agent Orchestrator System locally using TypeScript +--- + +## Prerequisites +- Node.js and npm installed +- AWS account with appropriate permissions +- Basic familiarity with TypeScript and async/await patterns + +## Quick Setup + +1. Create a new project: +```bash +mkdir test_multi_agent_orchestrator +cd test_multi_agent_orchestrator +npm init +``` + +2. Install dependencies: +```bash +npm install multi-agent-orchestrator +``` + +## Implementation + +1. Create a new file named `quickstart.ts`: + +2. Initialize the orchestrator: +```typescript +import { MultiAgentOrchestrator } from "multi-agent-orchestrator"; + +const orchestrator = new MultiAgentOrchestrator({ + config: { + LOG_AGENT_CHAT: true, + LOG_CLASSIFIER_CHAT: true, + LOG_CLASSIFIER_RAW_OUTPUT: false, + LOG_CLASSIFIER_OUTPUT: true, + LOG_EXECUTION_TIMES: true, + } +}); +``` + +3. Add specialized agents: +```typescript +import { BedrockLLMAgent } from "multi-agent-orchestrator"; + +orchestrator.addAgent( + new BedrockLLMAgent({ + name: "Tech Agent", + description: "Specializes in technology areas including software development, hardware, AI, cybersecurity, blockchain, cloud computing, emerging tech innovations, and pricing/costs related to technology products and services.", + }) +); + +orchestrator.addAgent( + new BedrockLLMAgent({ + name: "Health Agent", + description: "Focuses on health and medical topics such as general wellness, nutrition, diseases, treatments, mental health, fitness, healthcare systems, and medical terminology or concepts.", + }) +); +``` + +4. Implement the main logic: +```typescript +const userId = "quickstart-user"; +const sessionId = "quickstart-session"; +const query = "What are the latest trends in AI?"; +console.log(`\nUser Query: ${query}`); + +async function main() { + try { + const response = await orchestrator.routeRequest(query, userId, sessionId); + console.log("\n** RESPONSE ** \n"); + console.log(`> Agent ID: ${response.metadata.agentId}`); + console.log(`> Agent Name: ${response.metadata.agentName}`); + console.log(`> User Input: ${response.metadata.userInput}`); + console.log(`> User ID: ${response.metadata.userId}`); + console.log(`> Session ID: ${response.metadata.sessionId}`); + console.log(`> Additional Parameters:`, response.metadata.additionalParams); + console.log(`\n> Response: ${response.output}`); + } catch (error) { + console.error("An error occurred:", error); + } +} + +main(); +``` + +5. Run the application: +```bash +npx ts-node quickstart.ts +``` + +## Implementation Notes +- Uses default Bedrock Classifier with `anthropic.claude-3-5-sonnet-20240620-v1:0` +- Utilizes Bedrock LLM Agent with `anthropic.claude-3-haiku-20240307-v1:0` +- Implements in-memory storage by default + +## Next Steps +- Add additional specialized agents +- Implement persistent storage with DynamoDB +- Add custom error handling +- Implement streaming responses + +Ready to build your own multi-agent chat application? Check out the complete [source code](https://github.com/awslabs/multi-agent-orchestrator/tree/main/examples/local-demo) in our GitHub repository. diff --git a/docs/src/content/docs/deployment/aws-lambda-ts.md b/docs/src/content/docs/cookbook/lambda/aws-lambda-nodejs.md similarity index 97% rename from docs/src/content/docs/deployment/aws-lambda-ts.md rename to docs/src/content/docs/cookbook/lambda/aws-lambda-nodejs.md index bd7514e..c68c376 100644 --- a/docs/src/content/docs/deployment/aws-lambda-ts.md +++ b/docs/src/content/docs/cookbook/lambda/aws-lambda-nodejs.md @@ -1,5 +1,5 @@ --- -title: AWS Lambda JavaScript with Multi-Agent Orchestrator +title: AWS Lambda NodeJs with Multi-Agent Orchestrator description: How to set up the Multi-Agent Orchestrator System for AWS Lambda using JavaScript --- diff --git a/docs/src/content/docs/deployment/aws-lambda-py.md b/docs/src/content/docs/cookbook/lambda/aws-lambda-python.md similarity index 100% rename from docs/src/content/docs/deployment/aws-lambda-py.md rename to docs/src/content/docs/cookbook/lambda/aws-lambda-python.md diff --git a/docs/src/content/docs/advanced-features/agent-overlap.md b/docs/src/content/docs/cookbook/monitoring/agent-overlap.md similarity index 100% rename from docs/src/content/docs/advanced-features/agent-overlap.md rename to docs/src/content/docs/cookbook/monitoring/agent-overlap.md diff --git a/docs/src/content/docs/advanced-features/logging.mdx b/docs/src/content/docs/cookbook/monitoring/logging.mdx similarity index 100% rename from docs/src/content/docs/advanced-features/logging.mdx rename to docs/src/content/docs/cookbook/monitoring/logging.mdx diff --git a/docs/src/content/docs/cookbook/patterns/cost-efficient.md b/docs/src/content/docs/cookbook/patterns/cost-efficient.md new file mode 100644 index 0000000..a1cf420 --- /dev/null +++ b/docs/src/content/docs/cookbook/patterns/cost-efficient.md @@ -0,0 +1,57 @@ +--- +title: Cost-Efficient Routing Pattern +description: Cost-Efficient Routing Pattern using the Multi-Agent Orchestrator framework +--- + + +The Multi-Agent Orchestrator can intelligently route queries to the most cost-effective agent based on task complexity, optimizing resource utilization and reducing operational costs. + +## How It Works + +1. **Task Complexity Analysis** + - The classifier assesses incoming query complexity + - Considers factors like required expertise, computational intensity, and expected response time + - Makes routing decisions based on task requirements + +2. **Agent Cost Tiers** + - Agents are categorized into different cost tiers: + - Low-cost: General-purpose models for simple tasks + - Mid-tier: Balanced performance and cost + - High-cost: Specialized expert models for complex tasks + +3. **Dynamic Routing** + - Simple queries route to cheaper models + - Complex tasks route to specialized agents + - Automatic routing based on query analysis + +## Implementation Example + +```typescript +// Configure low-cost agent for simple queries +const basicAgent = new BedrockLLMAgent({ + name: "Basic Agent", + modelId: "mistral.mistral-small-2402-v1:0", + description: "Handles simple queries and basic information retrieval", + streaming: true, + inferenceConfig: { temperature: 0.0 } +}); + +// Configure expert agent for complex tasks +const expertAgent = new BedrockLLMAgent({ + name: "Expert Agent", + modelId: "anthropic.claude-3-sonnet-20240229-v1:0", + description: "Handles complex analysis and specialized tasks", + streaming: true, + inferenceConfig: { temperature: 0.0 } +}); + +// Add agents to orchestrator +orchestrator.addAgent(basicAgent); +orchestrator.addAgent(expertAgent); +``` + +## Benefits +- Optimal resource utilization +- Cost reduction for simple tasks +- Improved response quality for complex queries +- Efficient scaling based on query complexity \ No newline at end of file diff --git a/docs/src/content/docs/cookbook/patterns/multi-lingual.md b/docs/src/content/docs/cookbook/patterns/multi-lingual.md new file mode 100644 index 0000000..1456cc8 --- /dev/null +++ b/docs/src/content/docs/cookbook/patterns/multi-lingual.md @@ -0,0 +1,66 @@ +--- +title: Multi-lingual Routing Pattern +description: Multi-lingual Routing Pattern using the Multi-Agent Orchestrator framework +--- + + +By integrating language-specific agents, the Multi-Agent Orchestrator can provide multi-lingual support, enabling users to interact with the system in their preferred language while maintaining consistent experiences. + +## Key Components + +1. **Language Detection** + - Classifier identifies query language + - Routes to appropriate language-specific agent + - Maintains context across languages + +2. **Language-Specific Agents** + - Dedicated agents for each supported language + - Specialized in language-specific responses + - Consistent response quality across languages + +3. **Dynamic Language Routing** + - Automatic routing based on detected language + - Seamless language switching + - Maintains conversation context + +## Implementation Example + +```typescript +// French language agent +orchestrator.addAgent( + new BedrockLLMAgent({ + name: "Text Summarization Agent for French Language", + modelId: "anthropic.claude-3-haiku-20240307-v1:0", + description: "This is a very simple text summarization agent for french language.", + streaming: true, + inferenceConfig: { + temperature: 0.0, + }, + }) +); + +// English language agent +orchestrator.addAgent( + new BedrockLLMAgent({ + name: "Text Summarization Agent English Language", + modelId: "mistral.mistral-small-2402-v1:0", + description: "This is a very simple text summarization agent for english language.", + streaming: true, + inferenceConfig: { + temperature: 0.0, + } + }) +); +``` + +## Implementation Notes +- Models shown are for illustration +- Any suitable LLM can be substituted +- Principle remains consistent across different models +- Configure based on language-specific requirements + +## Benefits +- Native language support +- Consistent user experience +- Scalable language coverage +- Maintainable language-specific logic \ No newline at end of file diff --git a/docs/src/content/docs/advanced-features/math-tool-use.md b/docs/src/content/docs/cookbook/tools/math-operations.md similarity index 100% rename from docs/src/content/docs/advanced-features/math-tool-use.md rename to docs/src/content/docs/cookbook/tools/math-operations.md diff --git a/docs/src/content/docs/advanced-features/weather-tool-use.md b/docs/src/content/docs/cookbook/tools/weather-api.md similarity index 100% rename from docs/src/content/docs/advanced-features/weather-tool-use.md rename to docs/src/content/docs/cookbook/tools/weather-api.md diff --git a/docs/src/content/docs/deployment/local.mdx b/docs/src/content/docs/deployment/local.mdx deleted file mode 100644 index ac9a29c..0000000 --- a/docs/src/content/docs/deployment/local.mdx +++ /dev/null @@ -1,276 +0,0 @@ ---- -title: Local Execution -description: How to run the Multi-Agent Orchestrator System locally ---- -import { Tabs, TabItem } from '@astrojs/starlight/components'; - - -## Overview - -Running the Multi-Agent Orchestrator System locally is useful for development, testing, and debugging. This guide will walk you through setting up and running the orchestrator on your local machine. - -> 💁 Ensure you have Node.js and npm installed (for TypeScript) or Python installed (for Python) on your development environment before proceeding. - -## Prerequisites - -1. Create a new project: - - - - ```bash - mkdir test_multi_agent_orchestrator - cd test_multi_agent_orchestrator - npm init - ``` - Follow the steps to generate a `package.json` file. - - - ```bash - mkdir test_multi_agent_orchestrator - cd test_multi_agent_orchestrator - # Optional: Set up a virtual environment - python -m venv venv - source venv/bin/activate # On Windows use `venv\Scripts\activate` - ``` - - - -2. Authenticate with your AWS account - -This quickstart demonstrates the use of Amazon Bedrock for both classification and agent responses. - -By default, the framework is configured as follows: - - Classifier: Uses the **[Bedrock Classifier](/multi-agent-orchestrator/classifiers/built-in/bedrock-classifier/)** implementation with `anthropic.claude-3-5-sonnet-20240620-v1:0` - - Agent: Utilizes the **[Bedrock LLM Agent](/multi-agent-orchestrator/agents/built-in/bedrock-llm-agent)** with `anthropic.claude-3-haiku-20240307-v1:0` - -
- -> **Important** -> -> These are merely default settings and can be easily changed to suit your needs or preferences. - -
- -You have the flexibility to: - - Change the classifier model or implementation - - Change the agent model or implementation - - Use any other compatible models available through Amazon Bedrock - -Ensure you have [requested access](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html) to the models you intend to use through the AWS console. - -
- -> **To customize the model selection**: -> - For the classifier, refer to [our guide](/multi-agent-orchestrator/classifiers/overview) on configuring the classifier. -> - For the agent, refer to our guide on configuring [agents](/multi-agent-orchestrator/agents/overview). - -## 🚀 Get Started! - -1. Install the Multi-Agent Orchestrator framework in your project: - - - - ```bash - npm install multi-agent-orchestrator - ``` - - - ```bash - pip install multi-agent-orchestrator - ``` - - - -2. Create a new file for your quickstart code: - - - - Create a file named `quickstart.ts`. - - - Create a file named `quickstart.py`. - - - -3. Create an Orchestrator: - - - - ```typescript - import { MultiAgentOrchestrator } from "multi-agent-orchestrator"; - const orchestrator = new MultiAgentOrchestrator({ - config: { - LOG_AGENT_CHAT: true, - LOG_CLASSIFIER_CHAT: true, - LOG_CLASSIFIER_RAW_OUTPUT: false, - LOG_CLASSIFIER_OUTPUT: true, - LOG_EXECUTION_TIMES: true, - } - }); - ``` - - - ```python - import uuid - import asyncio - from typing import Optional, List, Dict, Any - import json - import sys - from multi_agent_orchestrator.orchestrator import MultiAgentOrchestrator, OrchestratorConfig - from multi_agent_orchestrator.agents import (BedrockLLMAgent, - BedrockLLMAgentOptions, - AgentResponse, - AgentCallbacks) - from multi_agent_orchestrator.types import ConversationMessage, ParticipantRole - - orchestrator = MultiAgentOrchestrator(options=OrchestratorConfig( - LOG_AGENT_CHAT=True, - LOG_CLASSIFIER_CHAT=True, - LOG_CLASSIFIER_RAW_OUTPUT=True, - LOG_CLASSIFIER_OUTPUT=True, - LOG_EXECUTION_TIMES=True, - MAX_RETRIES=3, - USE_DEFAULT_AGENT_IF_NONE_IDENTIFIED=True, - MAX_MESSAGE_PAIRS_PER_AGENT=10 - )) - ``` - - - -4. Add Agents: - - - - ```typescript - import { BedrockLLMAgent } from "multi-agent-orchestrator"; - orchestrator.addAgent( - new BedrockLLMAgent({ - name: "Tech Agent", - description: "Specializes in technology areas including software development, hardware, AI, cybersecurity, blockchain, cloud computing, emerging tech innovations, and pricing/costs related to technology products and services.", - }) - ); - - orchestrator.addAgent( - new BedrockLLMAgent({ - name: "Health Agent", - description: "Focuses on health and medical topics such as general wellness, nutrition, diseases, treatments, mental health, fitness, healthcare systems, and medical terminology or concepts.", - }) - ); - ``` - - - ```python - class BedrockLLMAgentCallbacks(AgentCallbacks): - def on_llm_new_token(self, token: str) -> None: - # handle response streaming here - print(token, end='', flush=True) - - tech_agent = BedrockLLMAgent(BedrockLLMAgentOptions( - name="Tech Agent", - streaming=True, - description="Specializes in technology areas including software development, hardware, AI, \ - cybersecurity, blockchain, cloud computing, emerging tech innovations, and pricing/costs \ - related to technology products and services.", - model_id="anthropic.claude-3-sonnet-20240229-v1:0", - callbacks=BedrockLLMAgentCallbacks() - )) - orchestrator.add_agent(tech_agent) - ``` - - - -5. Send a Query: - - - - ```typescript - const userId = "quickstart-user"; - const sessionId = "quickstart-session"; - const query = "What are the latest trends in AI?"; - console.log(`\nUser Query: ${query}`); - - async function main() { - try { - const response = await orchestrator.routeRequest(query, userId, sessionId); - console.log("\n** RESPONSE ** \n"); - console.log(`> Agent ID: ${response.metadata.agentId}`); - console.log(`> Agent Name: ${response.metadata.agentName}`); - console.log(`> User Input: ${response.metadata.userInput}`); - console.log(`> User ID: ${response.metadata.userId}`); - console.log(`> Session ID: ${response.metadata.sessionId}`); - console.log( - `> Additional Parameters:`, - response.metadata.additionalParams - ); - console.log(`\n> Response: ${response.output}`); - // ... rest of the logging code ... - } catch (error) { - console.error("An error occurred:", error); - // Here you could also add more specific error handling if needed - } - } - - main(); - ``` - - - ```python - async def handle_request(_orchestrator: MultiAgentOrchestrator, _user_input: str, _user_id: str, _session_id: str): - response: AgentResponse = await _orchestrator.route_request(_user_input, _user_id, _session_id) - # Print metadata - print("\nMetadata:") - print(f"Selected Agent: {response.metadata.agent_name}") - if response.streaming: - print('Response:', response.output.content[0]['text']) - else: - print('Response:', response.output.content[0]['text']) - - if __name__ == "__main__": - USER_ID = "user123" - SESSION_ID = str(uuid.uuid4()) - print("Welcome to the interactive Multi-Agent system. Type 'quit' to exit.") - while True: - # Get user input - user_input = input("\nYou: ").strip() - if user_input.lower() == 'quit': - print("Exiting the program. Goodbye!") - sys.exit() - # Run the async function - asyncio.run(handle_request(orchestrator, user_input, USER_ID, SESSION_ID)) - ``` - - - -Now, let's run the quickstart script: - - - - ```bash - npx ts-node quickstart.ts - ``` - - - ```bash - python quickstart.py - ``` - - - -Congratulations! 🎉 -You've successfully set up and run your first multi-agent conversation using the Multi-Agent Orchestrator System. - -## 👨‍💻 Next Steps - -Now that you've seen the basic functionality, here are some next steps to explore: - -1. Try adding other agents from those built-in in the framework ([Bedrock LLM Agent](/multi-agent-orchestrator/agents/built-in/bedrock-llm-agent), [Amazon Lex Bot](/multi-agent-orchestrator/agents/built-in/lex-bot-agent), [Amazon Bedrock Agent](/multi-agent-orchestrator/agents/built-in/amazon-bedrock-agent), [Lambda Agent](/multi-agent-orchestrator/agents/built-in/lambda-agent), [OpenAI Agent](/multi-agent-orchestrator/agents/built-in/openai-agent)). -2. Experiment with different storage options, such as [Amazon DynamoDB](/multi-agent-orchestrator/storage/dynamodb) for persistent storage. -3. Explore the [Agent Overlap Analysis](/multi-agent-orchestrator/advanced-features/agent-overlap) feature to optimize your agent configurations. -4. Integrate the system into a web application or deploy it as an [AWS Lambda function](/multi-agent-orchestrator/deployment/aws-lambda). -5. Try adding your own [custom agents](/multi-agent-orchestrator/agents/custom-agents) by extending the `Agent` class. - -For more detailed information on these advanced features, check out our full documentation. - -## 🧹 Cleanup - -As this quickstart uses in-memory storage and local resources, there's no cleanup required. Simply stop the script when you're done experimenting. \ No newline at end of file diff --git a/docs/src/content/docs/use-cases/use-cases.md b/docs/src/content/docs/use-cases/use-cases.md deleted file mode 100644 index fc225b7..0000000 --- a/docs/src/content/docs/use-cases/use-cases.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Use cases -description: An overview of the use cases that are possible to implement using this framework ---- - -The Multi-Agent Orchestrator framework can enable a wide range of powerful use cases across various industries and domains. Here are some potential use cases that can benefit from this flexible and scalable framework. - -## 1. Cost efficient routing: - -The Multi-Agent Orchestrator can intelligently route queries to the most cost-effective agent based on the complexity of the task, optimizing resource utilization and reducing overall operational costs. - -Key aspects of this approach include: - -1. Task Complexity Analysis: The classifier assesses the complexity of each incoming query, considering factors like required expertise, computational intensity, and expected response time. - -2. Agent Cost Tiers: Agents are categorized into different cost tiers, ranging from low-cost, general-purpose models to high-cost, specialized expert models. - -3. Dynamic Routing: Queries are dynamically routed to the lowest-cost agent capable of handling the task effectively. Simple queries (e.g., basic information retrieval, short summaries) are directed to cheaper, less powerful models, while complex tasks are routed to more expensive, specialized agents. - -This approach ensures that expensive computational resources are used judiciously, reserving them for tasks that truly require advanced capabilities. It allows organizations to manage costs effectively while still providing high-quality responses across a wide range of query complexities. - - -## 2. Multi-lingual routing: -By integrating language-specific agents, the Multi-Agent Orchestrator can facilitate multi-lingual support, enabling users to interact with the system in their preferred language while maintaining consistent experiences across languages. - -Key aspects of this approach include: - -1. Language Detection: The classifier incorporates language detection capabilities to identify the language of incoming queries. -2. Language-Specific Agents: The system includes a range of agents specialized in different languages, each trained to handle queries and generate responses in their respective languages. -3. Dynamic Language Routing: Queries are automatically routed to the appropriate language-specific agent based on the detected language of the user's input. - - -**Example of Multi-lingual text summarization agents:** - -```typescript - -orchestrator.addAgent( - new BedrockLLMAgent({ - name: "Text Summarization Agent for French Language", - modelId: "anthropic.claude-3-haiku-20240307-v1:0", - description: - "This is a very simple text summarization agent for french language.", - streaming: true, - inferenceConfig: { - temperature: 0.0, - }, - }) -); - -// Add a text summarization agent -orchestrator.addAgent( - new BedrockLLMAgent({ - name: "Text Summarization Agent English Language", - modelId: "mistral.mistral-small-2402-v1:0", - description: - "This is a very simple text summarization agent for english language.", - streaming: true, - inferenceConfig: { - temperature: 0.0, - } - }) -); - -``` - -These specific models are used for illustration. Other suitable LLMs can be substituted. The principle of using language-specific agents remains consistent, -regardless of the specific LLM chosen for each language. - - -### Note: -While not all existing components may inherently support both cost-efficient and multi-lingual routing, the modular and extensible nature of the Multi-Agent Orchestrator framework allows for relatively straightforward expansion to incorporate these features. Developers can leverage the framework's flexibility to implement custom classifiers, agents, or additional logic to enable these advanced routing capabilities without significant architectural overhauls. \ No newline at end of file