From d58d6e88fda03c3de7924f794b55e82e7cc8f85e Mon Sep 17 00:00:00 2001 From: Corneliu CROITORU Date: Tue, 27 Aug 2024 15:29:52 +0200 Subject: [PATCH 1/3] working agent in ts --- docs/src/content/docs/general/quickstart.mdx | 1 + typescript/package-lock.json | 49 +-- typescript/package.json | 1 + .../src/agents/bedrockTranslatorAgent.ts | 168 ++++++++++ typescript/src/agents/chainAgent.ts | 101 +++++++ .../src/agents/comprehendFilterAgent.ts | 286 ++++++++++++++++++ .../src/classifiers/anthropicClassifier.ts | 4 +- .../src/classifiers/bedrockClassifier.ts | 4 +- typescript/src/utils/helpers.ts | 3 +- 9 files changed, 592 insertions(+), 25 deletions(-) create mode 100644 typescript/src/agents/bedrockTranslatorAgent.ts create mode 100644 typescript/src/agents/chainAgent.ts create mode 100644 typescript/src/agents/comprehendFilterAgent.ts diff --git a/docs/src/content/docs/general/quickstart.mdx b/docs/src/content/docs/general/quickstart.mdx index b73e933..060b2c2 100644 --- a/docs/src/content/docs/general/quickstart.mdx +++ b/docs/src/content/docs/general/quickstart.mdx @@ -162,6 +162,7 @@ Ensure you have [requested access](https://docs.aws.amazon.com/bedrock/latest/us new BedrockLLMAgent({ name: "Tech Agent", description: "Specializes in technology areas including software development, hardware, AI, cybersecurity, blockchain, cloud computing, emerging tech innovations, and pricing/costs related to technology products and services.", + streaming: true }) ); diff --git a/typescript/package-lock.json b/typescript/package-lock.json index 1dd57e4..2684afe 100644 --- a/typescript/package-lock.json +++ b/typescript/package-lock.json @@ -26,6 +26,7 @@ }, "devDependencies": { "@types/jest": "^29.5.12", + "@types/mocha": "^10.0.7", "@typescript-eslint/eslint-plugin": "^7.17.0", "@typescript-eslint/parser": "^7.17.0", "aws-sdk-client-mock": "^4.0.1", @@ -2313,17 +2314,19 @@ } }, "node_modules/@smithy/core": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/@smithy/core/-/core-2.3.1.tgz", - "integrity": "sha512-BC7VMXx/1BCmRPCVzzn4HGWAtsrb7/0758EtwOGFJQrlSwJBEjCcDLNZLFoL/68JexYa2s+KmgL/UfmXdG6v1w==", + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/@smithy/core/-/core-2.4.0.tgz", + "integrity": "sha512-cHXq+FneIF/KJbt4q4pjN186+Jf4ZB0ZOqEaZMBhT79srEyGDDBV31NqBRBjazz8ppQ1bJbDJMY9ba5wKFV36w==", "dependencies": { "@smithy/middleware-endpoint": "^3.1.0", - "@smithy/middleware-retry": "^3.0.13", + "@smithy/middleware-retry": "^3.0.15", "@smithy/middleware-serde": "^3.0.3", "@smithy/protocol-http": "^4.1.0", - "@smithy/smithy-client": "^3.1.11", + "@smithy/smithy-client": "^3.2.0", "@smithy/types": "^3.3.0", + "@smithy/util-body-length-browser": "^3.0.0", "@smithy/util-middleware": "^3.0.3", + "@smithy/util-utf8": "^3.0.0", "tslib": "^2.6.2" }, "engines": { @@ -2484,14 +2487,14 @@ } }, "node_modules/@smithy/middleware-retry": { - "version": "3.0.13", - "resolved": "https://registry.npmjs.org/@smithy/middleware-retry/-/middleware-retry-3.0.13.tgz", - "integrity": "sha512-zvCLfaRYCaUmjbF2yxShGZdolSHft7NNCTA28HVN9hKcEbOH+g5irr1X9s+in8EpambclGnevZY4A3lYpvDCFw==", + "version": "3.0.15", + "resolved": "https://registry.npmjs.org/@smithy/middleware-retry/-/middleware-retry-3.0.15.tgz", + "integrity": "sha512-iTMedvNt1ApdvkaoE8aSDuwaoc+BhvHqttbA/FO4Ty+y/S5hW6Ci/CTScG7vam4RYJWZxdTElc3MEfHRVH6cgQ==", "dependencies": { "@smithy/node-config-provider": "^3.1.4", "@smithy/protocol-http": "^4.1.0", "@smithy/service-error-classification": "^3.0.3", - "@smithy/smithy-client": "^3.1.11", + "@smithy/smithy-client": "^3.2.0", "@smithy/types": "^3.3.0", "@smithy/util-middleware": "^3.0.3", "@smithy/util-retry": "^3.0.3", @@ -2646,9 +2649,9 @@ } }, "node_modules/@smithy/smithy-client": { - "version": "3.1.11", - "resolved": "https://registry.npmjs.org/@smithy/smithy-client/-/smithy-client-3.1.11.tgz", - "integrity": "sha512-l0BpyYkciNyMaS+PnFFz4aO5sBcXvGLoJd7mX9xrMBIm2nIQBVvYgp2ZpPDMzwjKCavsXu06iuCm0F6ZJZc6yQ==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@smithy/smithy-client/-/smithy-client-3.2.0.tgz", + "integrity": "sha512-pDbtxs8WOhJLJSeaF/eAbPgXg4VVYFlRcL/zoNYA5WbG3wBL06CHtBSg53ppkttDpAJ/hdiede+xApip1CwSLw==", "dependencies": { "@smithy/middleware-endpoint": "^3.1.0", "@smithy/middleware-stack": "^3.0.3", @@ -2738,12 +2741,12 @@ } }, "node_modules/@smithy/util-defaults-mode-browser": { - "version": "3.0.13", - "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-browser/-/util-defaults-mode-browser-3.0.13.tgz", - "integrity": "sha512-ZIRSUsnnMRStOP6OKtW+gCSiVFkwnfQF2xtf32QKAbHR6ACjhbAybDvry+3L5qQYdh3H6+7yD/AiUE45n8mTTw==", + "version": "3.0.15", + "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-browser/-/util-defaults-mode-browser-3.0.15.tgz", + "integrity": "sha512-FZ4Psa3vjp8kOXcd3HJOiDPBCWtiilLl57r0cnNtq/Ga9RSDrM5ERL6xt+tO43+2af6Pn5Yp92x2n5vPuduNfg==", "dependencies": { "@smithy/property-provider": "^3.1.3", - "@smithy/smithy-client": "^3.1.11", + "@smithy/smithy-client": "^3.2.0", "@smithy/types": "^3.3.0", "bowser": "^2.11.0", "tslib": "^2.6.2" @@ -2753,15 +2756,15 @@ } }, "node_modules/@smithy/util-defaults-mode-node": { - "version": "3.0.13", - "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-node/-/util-defaults-mode-node-3.0.13.tgz", - "integrity": "sha512-voUa8TFJGfD+U12tlNNLCDlXibt9vRdNzRX45Onk/WxZe7TS+hTOZouEZRa7oARGicdgeXvt1A0W45qLGYdy+g==", + "version": "3.0.15", + "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-node/-/util-defaults-mode-node-3.0.15.tgz", + "integrity": "sha512-KSyAAx2q6d0t6f/S4XB2+3+6aQacm3aLMhs9aLMqn18uYGUepbdssfogW5JQZpc6lXNBnp0tEnR5e9CEKmEd7A==", "dependencies": { "@smithy/config-resolver": "^3.0.5", "@smithy/credential-provider-imds": "^3.2.0", "@smithy/node-config-provider": "^3.1.4", "@smithy/property-provider": "^3.1.3", - "@smithy/smithy-client": "^3.1.11", + "@smithy/smithy-client": "^3.2.0", "@smithy/types": "^3.3.0", "tslib": "^2.6.2" }, @@ -2980,6 +2983,12 @@ "pretty-format": "^29.0.0" } }, + "node_modules/@types/mocha": { + "version": "10.0.7", + "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-10.0.7.tgz", + "integrity": "sha512-GN8yJ1mNTcFcah/wKEFIJckJx9iJLoMSzWcfRRuxz/Jk+U6KQNnml+etbtxFK8lPjzOw3zp4Ha/kjSst9fsHYw==", + "dev": true + }, "node_modules/@types/node": { "version": "20.14.9", "resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.9.tgz", diff --git a/typescript/package.json b/typescript/package.json index e7e209a..2df503e 100644 --- a/typescript/package.json +++ b/typescript/package.json @@ -44,6 +44,7 @@ }, "devDependencies": { "@types/jest": "^29.5.12", + "@types/mocha": "^10.0.7", "@typescript-eslint/eslint-plugin": "^7.17.0", "@typescript-eslint/parser": "^7.17.0", "aws-sdk-client-mock": "^4.0.1", diff --git a/typescript/src/agents/bedrockTranslatorAgent.ts b/typescript/src/agents/bedrockTranslatorAgent.ts new file mode 100644 index 0000000..c3f9f4e --- /dev/null +++ b/typescript/src/agents/bedrockTranslatorAgent.ts @@ -0,0 +1,168 @@ +import { Agent, AgentOptions } from "./agent"; +import { ConversationMessage, ParticipantRole, BEDROCK_MODEL_ID_CLAUDE_3_HAIKU } from "../types"; +import { BedrockRuntimeClient, ConverseCommand, ContentBlock } from "@aws-sdk/client-bedrock-runtime"; +import { Logger } from "../utils/logger"; + +interface BedrockTranslatorAgentOptions extends AgentOptions { + sourceLanguage?: string; + targetLanguage?: string; + modelId?: string; + inferenceConfig?: { + maxTokens?: number; + temperature?: number; + topP?: number; + stopSequences?: string[]; + }; +} + +interface ToolInput { + translation: string; +} + +function isToolInput(input: unknown): input is ToolInput { + return ( + typeof input === 'object' && + input !== null && + 'translation' in input + ); +} + +export class BedrockTranslatorAgent extends Agent { + private sourceLanguage?: string; + private targetLanguage: string; + private modelId: string; + private client: BedrockRuntimeClient; + private inferenceConfig: { + maxTokens?: number; + temperature?: number; + topP?: number; + stopSequences?: string[]; + }; + + private tools = [ + { + toolSpec: { + name: "Translate", + description: "Translate text to target language", + inputSchema: { + json: { + type: "object", + properties: { + translation: { + type: "string", + description: "The translated text", + }, + }, + required: ["translation"], + }, + }, + }, + }, + ]; + + constructor(options: BedrockTranslatorAgentOptions) { + super(options); + this.sourceLanguage = options.sourceLanguage; + this.targetLanguage = options.targetLanguage || 'English'; + this.modelId = options.modelId || BEDROCK_MODEL_ID_CLAUDE_3_HAIKU; + this.client = new BedrockRuntimeClient({ region: options.region }); + this.inferenceConfig = options.inferenceConfig || {}; + } + + async processRequest( + inputText: string, + userId: string, + sessionId: string, + chatHistory: ConversationMessage[], + additionalParams?: Record + ): Promise { + // Check if input is a number + if (!isNaN(Number(inputText))) { + return { + role: ParticipantRole.ASSISTANT, + content: [{ text: inputText }], + }; + } + + const userMessage: ConversationMessage = { + role: ParticipantRole.USER, + content: [{ text: `${inputText}` }], + }; + + let systemPrompt = `You are a translator. Translate the text within the tags`; + if (this.sourceLanguage) { + systemPrompt += ` from ${this.sourceLanguage} to ${this.targetLanguage}`; + } else { + systemPrompt += ` to ${this.targetLanguage}`; + } + systemPrompt += `. Only provide the translation using the Translate tool.`; + + const converseCmd = { + modelId: this.modelId, + messages: [userMessage], + system: [{ text: systemPrompt }], + toolConfig: { + tools: this.tools, + toolChoice: { + tool: { + name: "Translate", + }, + }, + }, + inferenceConfiguration: { + maximumTokens: this.inferenceConfig.maxTokens, + temperature: this.inferenceConfig.temperature, + topP: this.inferenceConfig.topP, + stopSequences: this.inferenceConfig.stopSequences, + }, + }; + + try { + const command = new ConverseCommand(converseCmd); + const response = await this.client.send(command); + + if (!response.output) { + throw new Error("No output received from Bedrock model"); + } + if (response.output.message.content) { + const responseContentBlocks = response.output.message + .content as ContentBlock[]; + + for (const contentBlock of responseContentBlocks) { + if ("toolUse" in contentBlock) { + const toolUse = contentBlock.toolUse; + if (!toolUse) { + throw new Error("No tool use found in the response"); + } + + if (!isToolInput(toolUse.input)) { + throw new Error("Tool input does not match expected structure"); + } + + if (typeof toolUse.input.translation !== 'string') { + throw new Error("Translation is not a string"); + } + + return { + role: ParticipantRole.ASSISTANT, + content: [{ text: toolUse.input.translation }], + }; + } + } + } + + throw new Error("No valid tool use found in the response"); + } catch (error) { + Logger.logger.error("Error processing translation request:", error); + throw error; + } + } + + setSourceLanguage(language: string | undefined): void { + this.sourceLanguage = language; + } + + setTargetLanguage(language: string): void { + this.targetLanguage = language; + } +} \ No newline at end of file diff --git a/typescript/src/agents/chainAgent.ts b/typescript/src/agents/chainAgent.ts new file mode 100644 index 0000000..ada831b --- /dev/null +++ b/typescript/src/agents/chainAgent.ts @@ -0,0 +1,101 @@ +import { Agent, AgentOptions } from "./agent"; +import { ConversationMessage, ParticipantRole } from "../types"; +import { Logger } from "../utils/logger"; + +export interface ChainAgentOptions extends AgentOptions { + agents: Agent[]; + defaultOutput?: string; +} + +export class ChainAgent extends Agent { + agents: Agent[]; + private defaultOutput: string; + + constructor(options: ChainAgentOptions) { + super(options); + this.agents = options.agents; + this.defaultOutput = options.defaultOutput || "No output generated from the chain."; + + if (this.agents.length === 0) { + throw new Error("ChainAgent requires at least one agent in the chain."); + } + } + + async processRequest( + inputText: string, + userId: string, + sessionId: string, + chatHistory: ConversationMessage[], + additionalParams?: Record + ): Promise> { + + let currentInput = inputText; + let finalResponse: ConversationMessage | AsyncIterable; + + console.log(`Processing chain with ${this.agents.length} agents`); + + for (let i = 0; i < this.agents.length; i++) { + const isLastAgent = i === this.agents.length - 1; + const agent = this.agents[i]; + + try { + console.log(`Input for agent ${i}: ${currentInput}`); + const response = await agent.processRequest( + currentInput, + userId, + sessionId, + chatHistory, + additionalParams + ); + + if (this.isConversationMessage(response)) { + if (response.content.length > 0 && 'text' in response.content[0]) { + currentInput = response.content[0].text; + finalResponse = response; + console.log(`Output from agent ${i}: ${currentInput}`); + } else { + Logger.logger.warn(`Agent ${agent.name} returned no text content.`); + return this.createDefaultResponse(); + } + } else if (this.isAsyncIterable(response)) { + if (!isLastAgent) { + Logger.logger.warn(`Intermediate agent ${agent.name} returned a streaming response, which is not allowed.`); + return this.createDefaultResponse(); + } + // It's the last agent and streaming is allowed + finalResponse = response; + } else { + Logger.logger.warn(`Agent ${agent.name} returned an invalid response type.`); + return this.createDefaultResponse(); + } + + // If it's not the last agent, ensure we have a non-streaming response to pass to the next agent + if (!isLastAgent && !this.isConversationMessage(finalResponse)) { + Logger.logger.error(`Expected non-streaming response from intermediate agent ${agent.name}`); + return this.createDefaultResponse(); + } + } catch (error) { + Logger.logger.error(`Error processing request with agent ${agent.name}:`, error); + return this.createDefaultResponse(); + } + } + + return finalResponse; + } + + private isAsyncIterable(obj: any): obj is AsyncIterable { + return obj && typeof obj[Symbol.asyncIterator] === 'function'; + } + + + private isConversationMessage(response: any): response is ConversationMessage { + return response && 'role' in response && 'content' in response && Array.isArray(response.content); + } + + private createDefaultResponse(): ConversationMessage { + return { + role: ParticipantRole.ASSISTANT, + content: [{ text: this.defaultOutput }], + }; + } +} \ No newline at end of file diff --git a/typescript/src/agents/comprehendFilterAgent.ts b/typescript/src/agents/comprehendFilterAgent.ts new file mode 100644 index 0000000..8f4d4a0 --- /dev/null +++ b/typescript/src/agents/comprehendFilterAgent.ts @@ -0,0 +1,286 @@ +import { Agent, AgentOptions } from "./agent"; +import { ConversationMessage, ParticipantRole } from "../types"; +import { Logger } from "../utils/logger"; +import { + ComprehendClient, + DetectSentimentCommand, + DetectPiiEntitiesCommand, + DetectToxicContentCommand, + DetectSentimentCommandOutput, + DetectPiiEntitiesCommandOutput, + DetectToxicContentCommandOutput, + LanguageCode +} from "@aws-sdk/client-comprehend"; + +// Interface for toxic content labels returned by Comprehend +interface ToxicContent { + Name: "GRAPHIC" | "HARASSMENT_OR_ABUSE" | "HATE_SPEECH" | "INSULT" | "PROFANITY" | "SEXUAL" | "VIOLENCE_OR_THREAT"; + Score: number; +} + +// Interface for toxic labels result structure +interface ToxicLabels { + Labels: ToxicContent[]; + Toxicity: number; +} + +// Type definition for custom check functions +type CheckFunction = (input: string) => Promise; + +// Extended options for ComprehendContentFilterAgent +export interface ComprehendFilterAgentOptions extends AgentOptions { + enableSentimentCheck?: boolean; + enablePiiCheck?: boolean; + enableToxicityCheck?: boolean; + sentimentThreshold?: number; + toxicityThreshold?: number; + allowPii?: boolean; + languageCode?: LanguageCode; +} + +/** + * ComprehendContentFilterAgent class + * + * This agent uses Amazon Comprehend to analyze and filter content based on + * sentiment, PII, and toxicity. It can be configured to enable/disable specific + * checks and allows for the addition of custom checks. + */ +export class ComprehendFilterAgent extends Agent { + private comprehendClient: ComprehendClient; + private customChecks: CheckFunction[] = []; + + private enableSentimentCheck: boolean; + private enablePiiCheck: boolean; + private enableToxicityCheck: boolean; + private sentimentThreshold: number; + private toxicityThreshold: number; + private allowPii: boolean; + private languageCode: LanguageCode; + + /** + * Constructor for ComprehendContentFilterAgent + * @param options - Configuration options for the agent + */ + constructor(options: ComprehendFilterAgentOptions) { + super(options); + + this.comprehendClient = options.region + ? new ComprehendClient({ region: options.region }) + : new ComprehendClient(); + + // Set default configuration using fields from options + this.enableSentimentCheck = options.enableSentimentCheck ?? true; + this.enablePiiCheck = options.enablePiiCheck ?? true; + this.enableToxicityCheck = options.enableToxicityCheck ?? true; + this.sentimentThreshold = options.sentimentThreshold ?? 0.7; + this.toxicityThreshold = options.toxicityThreshold ?? 0.7; + this.allowPii = options.allowPii ?? false; + this.languageCode = this.validateLanguageCode(options.languageCode) ?? 'en'; + + // Ensure at least one check is enabled + if (!this.enableSentimentCheck && + !this.enablePiiCheck && + !this.enableToxicityCheck) { + this.enableToxicityCheck = true; + } + } + + /** + * Process a request through the content filter + * + * @param inputText - The text to be analyzed + * @param userId - User identifier + * @param sessionId - Session identifier + * @param chatHistory - Array of previous conversation messages + * @param additionalParams - Optional additional parameters + * @returns A promise resolving to a ConversationMessage if content is safe, or null if flagged + */ + async processRequest( + inputText: string, + userId: string, + sessionId: string, + chatHistory: ConversationMessage[], + additionalParams?: Record + ): Promise { + try { + const issues: string[] = []; + + // Run all checks in parallel + const [sentimentResult, piiResult, toxicityResult] = await Promise.all([ + this.enableSentimentCheck ? this.detectSentiment(inputText) : null, + this.enablePiiCheck ? this.detectPiiEntities(inputText) : null, + this.enableToxicityCheck ? this.detectToxicContent(inputText) : null + ]); + + // Process results + if (this.enableSentimentCheck && sentimentResult) { + const sentimentIssue = this.checkSentiment(sentimentResult); + if (sentimentIssue) issues.push(sentimentIssue); + } + + if (this.enablePiiCheck && piiResult) { + const piiIssue = this.checkPii(piiResult); + if (piiIssue) issues.push(piiIssue); + } + + if (this.enableToxicityCheck && toxicityResult) { + const toxicityIssue = this.checkToxicity(toxicityResult); + if (toxicityIssue) issues.push(toxicityIssue); + } + + // Run custom checks + for (const check of this.customChecks) { + const customIssue = await check(inputText); + if (customIssue) issues.push(customIssue); + } + + if (issues.length > 0) { + Logger.logger.warn(`Content filter issues detected: ${issues.join('; ')}`); + return null; // Return null to indicate content should not be processed further + } + + // If no issues, return the original input as a ConversationMessage + return { + role: ParticipantRole.ASSISTANT, + content: [{ text: inputText }] + }; + + } catch (error) { + Logger.logger.error("Error in ComprehendContentFilterAgent:", error); + throw error; + } + } + + /** + * Add a custom check function to the agent + * @param check - A function that takes a string input and returns a Promise + */ + addCustomCheck(check: CheckFunction) { + this.customChecks.push(check); + } + + /** + * Check sentiment of the input text + * @param result - Result from Comprehend's sentiment detection + * @returns A string describing the issue if sentiment is negative, null otherwise + */ + private checkSentiment(result: DetectSentimentCommandOutput): string | null { + if (result.Sentiment === 'NEGATIVE' && + result.SentimentScore?.Negative > this.sentimentThreshold) { + return `Negative sentiment detected (${result.SentimentScore.Negative.toFixed(2)})`; + } + return null; + } + + /** + * Check for PII in the input text + * @param result - Result from Comprehend's PII detection + * @returns A string describing the issue if PII is detected, null otherwise + */ + private checkPii(result: DetectPiiEntitiesCommandOutput): string | null { + if (!this.allowPii && result.Entities && result.Entities.length > 0) { + return `PII detected: ${result.Entities.map(e => e.Type).join(', ')}`; + } + return null; + } + + /** + * Check for toxic content in the input text + * @param result - Result from Comprehend's toxic content detection + * @returns A string describing the issue if toxic content is detected, null otherwise + */ + private checkToxicity(result: DetectToxicContentCommandOutput): string | null { + const toxicLabels = this.getToxicLabels(result); + if (toxicLabels.length > 0) { + return `Toxic content detected: ${toxicLabels.join(', ')}`; + } + return null; + } + + /** + * Detect sentiment using Amazon Comprehend + * @param text - Input text to analyze + */ + private async detectSentiment(text: string) { + const command = new DetectSentimentCommand({ + Text: text, + LanguageCode: this.languageCode + }); + return this.comprehendClient.send(command); + } + + /** + * Detect PII entities using Amazon Comprehend + * @param text - Input text to analyze + */ + private async detectPiiEntities(text: string) { + const command = new DetectPiiEntitiesCommand({ + Text: text, + LanguageCode: this.languageCode + }); + return this.comprehendClient.send(command); + } + + /** + * Detect toxic content using Amazon Comprehend + * @param text - Input text to analyze + */ + private async detectToxicContent(text: string) { + const command = new DetectToxicContentCommand({ + TextSegments: [{ Text: text }], + LanguageCode: this.languageCode + }); + return this.comprehendClient.send(command); + } + + /** + * Extract toxic labels from the Comprehend response + * @param toxicityResult - Result from Comprehend's toxic content detection + * @returns Array of toxic label names that exceed the threshold + */ + private getToxicLabels(toxicityResult: DetectToxicContentCommandOutput): string[] { + const toxicLabels: string[] = []; + + if (toxicityResult.ResultList && Array.isArray(toxicityResult.ResultList)) { + toxicityResult.ResultList.forEach((result: ToxicLabels) => { + if (result.Labels && Array.isArray(result.Labels)) { + result.Labels.forEach((label: ToxicContent) => { + if (label.Score > this.toxicityThreshold) { + toxicLabels.push(label.Name); + } + }); + } + }); + } + + return toxicLabels; + } + + /** + * Set the language code for Comprehend operations + * @param languageCode - The ISO 639-1 language code + */ + setLanguageCode(languageCode: LanguageCode): void { + const validatedLanguageCode = this.validateLanguageCode(languageCode); + if (validatedLanguageCode) { + this.languageCode = validatedLanguageCode; + } else { + throw new Error(`Invalid language code: ${languageCode}`); + } + } + + /** + * Validate the provided language code + * @param languageCode - The language code to validate + * @returns The validated LanguageCode or undefined if invalid + */ + private validateLanguageCode(languageCode: LanguageCode | undefined): LanguageCode | undefined { + if (!languageCode) return undefined; + + const validLanguageCodes: LanguageCode[] = [ + 'en', 'es', 'fr', 'de', 'it', 'pt', 'ar', 'hi', 'ja', 'ko', 'zh', 'zh-TW' + ]; + + return validLanguageCodes.includes(languageCode) ? languageCode : undefined; + } +} \ No newline at end of file diff --git a/typescript/src/classifiers/anthropicClassifier.ts b/typescript/src/classifiers/anthropicClassifier.ts index d07bed6..ad1b810 100644 --- a/typescript/src/classifiers/anthropicClassifier.ts +++ b/typescript/src/classifiers/anthropicClassifier.ts @@ -3,7 +3,7 @@ import { ConversationMessage, ParticipantRole, } from "../types"; -import { isToolInput } from "../utils/helpers"; +import { isClassifierToolInput } from "../utils/helpers"; import { Logger } from "../utils/logger"; import { Classifier, ClassifierResult } from "./classifier"; import { Anthropic } from "@anthropic-ai/sdk"; @@ -117,7 +117,7 @@ async processRequest( throw new Error("No tool use found in the response"); } - if (!isToolInput(toolUse.input)) { + if (!isClassifierToolInput(toolUse.input)) { throw new Error("Tool input does not match expected structure"); } diff --git a/typescript/src/classifiers/bedrockClassifier.ts b/typescript/src/classifiers/bedrockClassifier.ts index 1fb233b..342b5c1 100644 --- a/typescript/src/classifiers/bedrockClassifier.ts +++ b/typescript/src/classifiers/bedrockClassifier.ts @@ -10,7 +10,7 @@ import { } from "@aws-sdk/client-bedrock-runtime"; import { Classifier, ClassifierResult } from "./classifier"; -import { isToolInput } from "../utils/helpers"; +import { isClassifierToolInput } from "../utils/helpers"; import { Logger } from "../utils/logger"; @@ -164,7 +164,7 @@ export class BedrockClassifier extends Classifier{ throw new Error("No tool use found in the response"); } - if (!isToolInput(toolUse.input)) { + if (!isClassifierToolInput(toolUse.input)) { throw new Error("Tool input does not match expected structure"); } diff --git a/typescript/src/utils/helpers.ts b/typescript/src/utils/helpers.ts index 006d337..b30fbad 100644 --- a/typescript/src/utils/helpers.ts +++ b/typescript/src/utils/helpers.ts @@ -43,10 +43,11 @@ export class AccumulatorTransform extends Transform { } - export function isToolInput(input: unknown): input is ToolInput { + export function isClassifierToolInput(input: unknown): input is ToolInput { return ( typeof input === 'object' && input !== null && + 'userinput' in input && 'selected_agent' in input && 'confidence' in input ); From 81ca3babee73e7d5af7d0b794a4b7be06aed370b Mon Sep 17 00:00:00 2001 From: Corneliu CROITORU Date: Tue, 27 Aug 2024 18:05:09 +0200 Subject: [PATCH 2/3] python version working --- .../agents/__init__.py | 9 + .../agents/bedrock_llm_agent.py | 1 + .../agents/bedrock_translator_agent.py | 135 +++++++++++++++ .../agents/chain_agent.py | 89 ++++++++++ .../agents/comprehend_filter_agent.py | 159 ++++++++++++++++++ 5 files changed, 393 insertions(+) create mode 100644 python/src/multi_agent_orchestrator/agents/bedrock_translator_agent.py create mode 100644 python/src/multi_agent_orchestrator/agents/chain_agent.py create mode 100644 python/src/multi_agent_orchestrator/agents/comprehend_filter_agent.py diff --git a/python/src/multi_agent_orchestrator/agents/__init__.py b/python/src/multi_agent_orchestrator/agents/__init__.py index e1000cc..3eebbaf 100644 --- a/python/src/multi_agent_orchestrator/agents/__init__.py +++ b/python/src/multi_agent_orchestrator/agents/__init__.py @@ -6,6 +6,9 @@ from .bedrock_llm_agent import BedrockLLMAgent, BedrockLLMAgentOptions from .lex_bot_agent import LexBotAgent, LexBotAgentOptions from .amazon_bedrock_agent import AmazonBedrockAgent, AmazonBedrockAgentOptions +from .comprehend_filter_agent import ComprehendFilterAgent, ComprehendFilterAgentOptions +from .chain_agent import ChainAgent, ChainAgentOptions +from .bedrock_translator_agent import BedrockTranslatorAgent, BedrockTranslatorAgentOptions __all__ = [ @@ -22,4 +25,10 @@ 'LexBotAgentOptions', 'AmazonBedrockAgent', 'AmazonBedrockAgentOptions' + 'ComprehendFilterAgent', + 'ComprehendFilterAgentOptions', + 'BedrockTranslatorAgent', + 'BedrockTranslatorAgentOptions', + 'ChainAgent', + 'ChainAgentOptions' ] diff --git a/python/src/multi_agent_orchestrator/agents/bedrock_llm_agent.py b/python/src/multi_agent_orchestrator/agents/bedrock_llm_agent.py index 55cfcc0..1a890e9 100644 --- a/python/src/multi_agent_orchestrator/agents/bedrock_llm_agent.py +++ b/python/src/multi_agent_orchestrator/agents/bedrock_llm_agent.py @@ -81,6 +81,7 @@ async def process_request( chat_history: List[ConversationMessage], additional_params: Optional[Dict[str, str]] = None ) -> Union[ConversationMessage, AsyncIterable[Any]]: + user_message =ConversationMessage( role=ParticipantRole.USER.value, content=[{'text': input_text}] diff --git a/python/src/multi_agent_orchestrator/agents/bedrock_translator_agent.py b/python/src/multi_agent_orchestrator/agents/bedrock_translator_agent.py new file mode 100644 index 0000000..a0dcc36 --- /dev/null +++ b/python/src/multi_agent_orchestrator/agents/bedrock_translator_agent.py @@ -0,0 +1,135 @@ +from typing import List, Dict, Optional, Any +from multi_agent_orchestrator.types import ConversationMessage, ParticipantRole, BEDROCK_MODEL_ID_CLAUDE_3_HAIKU +from multi_agent_orchestrator.utils import conversation_to_dict, Logger +from dataclasses import dataclass +from .agent import Agent, AgentOptions +import boto3 + +@dataclass +class BedrockTranslatorAgentOptions(AgentOptions): + source_language: Optional[str] = None + target_language: Optional[str] = None + inference_config: Optional[Dict[str, Any]] = None + model_id: Optional[str] = None + region: Optional[str] = None + +class BedrockTranslatorAgent(Agent): + def __init__(self, options: BedrockTranslatorAgentOptions): + super().__init__(options) + self.source_language = options.source_language + self.target_language = options.target_language or 'English' + self.model_id = options.model_id or BEDROCK_MODEL_ID_CLAUDE_3_HAIKU + self.client = boto3.client('bedrock-runtime', region_name=options.region) + + # Default inference configuration + self.inference_config: Dict[str, Any] = options.inference_config or { + 'maxTokens': 1000, + 'temperature': 0.0, + 'topP': 0.9, + 'stopSequences': [] + } + + # Define the translation tool + self.tools = [{ + "toolSpec": { + "name": "Translate", + "description": "Translate text to target language", + "inputSchema": { + "json": { + "type": "object", + "properties": { + "translation": { + "type": "string", + "description": "The translated text", + }, + }, + "required": ["translation"], + }, + }, + }, + }] + + async def process_request(self, + input_text: str, + user_id: str, + session_id: str, + chat_history: List[ConversationMessage], + additional_params: Optional[Dict[str, str]] = None) -> ConversationMessage: + # Check if input is a number and return it as-is if true + if input_text.isdigit(): + return ConversationMessage( + role=ParticipantRole.ASSISTANT, + content=[{"text": input_text}] + ) + + # Prepare user message + user_message = ConversationMessage( + role=ParticipantRole.USER, + content=[{"text": f"{input_text}"}] + ) + + # Construct system prompt + system_prompt = "You are a translator. Translate the text within the tags" + if self.source_language: + system_prompt += f" from {self.source_language} to {self.target_language}" + else: + system_prompt += f" to {self.target_language}" + system_prompt += ". Only provide the translation using the Translate tool." + + # Prepare the converse command for Bedrock + converse_cmd = { + "modelId": self.model_id, + "messages": [conversation_to_dict(user_message)], + "system": [{"text": system_prompt}], + "toolConfig": { + "tools": self.tools, + "toolChoice": { + "tool": { + "name": "Translate", + }, + }, + }, + 'inferenceConfig': self.inference_config + } + + try: + # Send request to Bedrock + response = self.client.converse(**converse_cmd) + + if 'output' not in response: + raise ValueError("No output received from Bedrock model") + + if response['output'].get('message', {}).get('content'): + response_content_blocks = response['output']['message']['content'] + + for content_block in response_content_blocks: + if "toolUse" in content_block: + tool_use = content_block["toolUse"] + if not tool_use: + raise ValueError("No tool use found in the response") + + if not isinstance(tool_use.get('input'), dict) or 'translation' not in tool_use['input']: + raise ValueError("Tool input does not match expected structure") + + translation = tool_use['input']['translation'] + if not isinstance(translation, str): + raise ValueError("Translation is not a string") + + # Return the translated text + return ConversationMessage( + role=ParticipantRole.ASSISTANT, + content=[{"text": translation}] + ) + + raise ValueError("No valid tool use found in the response") + except Exception as error: + Logger.error("Error processing translation request:", error) + raise + + def set_source_language(self, language: Optional[str]): + """Set the source language for translation""" + self.source_language = language + + def set_target_language(self, language: str): + """Set the target language for translation""" + self.target_language = language \ No newline at end of file diff --git a/python/src/multi_agent_orchestrator/agents/chain_agent.py b/python/src/multi_agent_orchestrator/agents/chain_agent.py new file mode 100644 index 0000000..269b833 --- /dev/null +++ b/python/src/multi_agent_orchestrator/agents/chain_agent.py @@ -0,0 +1,89 @@ +from typing import List, Dict, Union, AsyncIterable, Optional +from multi_agent_orchestrator.types import ConversationMessage, ParticipantRole +from multi_agent_orchestrator.utils.logger import Logger +from .agent import Agent, AgentOptions + +class ChainAgentOptions(AgentOptions): + def __init__(self, agents: List[Agent], default_output: Optional[str] = None, **kwargs): + super().__init__(**kwargs) + self.agents = agents + self.default_output = default_output + +class ChainAgent(Agent): + def __init__(self, options: ChainAgentOptions): + super().__init__(options) + self.agents = options.agents + self.default_output = options.default_output or "No output generated from the chain." + if len(self.agents) == 0: + raise ValueError("ChainAgent requires at least one agent in the chain.") + + async def process_request( + self, + input_text: str, + user_id: str, + session_id: str, + chat_history: List[ConversationMessage], + additional_params: Optional[Dict[str, str]] = None + ) -> Union[ConversationMessage, AsyncIterable[any]]: + current_input = input_text + final_response: Union[ConversationMessage, AsyncIterable[any]] + print(f"Processing chain with {len(self.agents)} agents") + + for i, agent in enumerate(self.agents): + is_last_agent = i == len(self.agents) - 1 + try: + print(f"Input for agent {i}: {current_input}") + response = await agent.process_request( + current_input, + user_id, + session_id, + chat_history, + additional_params + ) + if self.is_conversation_message(response): + if response.content and 'text' in response.content[0]: + current_input = response.content[0]['text'] + final_response = response + print(f"Output from agent {i}: {current_input}") + else: + Logger.logger.warning(f"Agent {agent.name} returned no text content.") + return self.create_default_response() + elif self.is_async_iterable(response): + if not is_last_agent: + Logger.logger.warning(f"Intermediate agent {agent.name} returned a streaming response, which is not allowed.") + return self.create_default_response() + # It's the last agent and streaming is allowed + final_response = response + else: + Logger.logger.warning(f"Agent {agent.name} returned an invalid response type.") + return self.create_default_response() + + # If it's not the last agent, ensure we have a non-streaming response to pass to the next agent + if not is_last_agent and not self.is_conversation_message(final_response): + Logger.logger.error(f"Expected non-streaming response from intermediate agent {agent.name}") + return self.create_default_response() + + except Exception as error: + Logger.logger.error(f"Error processing request with agent {agent.name}:", error) + return self.create_default_response() + + return final_response + + @staticmethod + def is_async_iterable(obj: any) -> bool: + return hasattr(obj, '__aiter__') + + @staticmethod + def is_conversation_message(response: any) -> bool: + return ( + isinstance(response, ConversationMessage) and + hasattr(response, 'role') and + hasattr(response, 'content') and + isinstance(response.content, list) + ) + + def create_default_response(self) -> ConversationMessage: + return ConversationMessage( + role=ParticipantRole.ASSISTANT, + content=[{"text": self.default_output}] + ) \ No newline at end of file diff --git a/python/src/multi_agent_orchestrator/agents/comprehend_filter_agent.py b/python/src/multi_agent_orchestrator/agents/comprehend_filter_agent.py new file mode 100644 index 0000000..04eb25a --- /dev/null +++ b/python/src/multi_agent_orchestrator/agents/comprehend_filter_agent.py @@ -0,0 +1,159 @@ +from typing import List, Dict, Union, Optional, Callable, Any +from multi_agent_orchestrator.types import ConversationMessage, ParticipantRole +from multi_agent_orchestrator.utils.logger import Logger +from .agent import Agent, AgentOptions +import boto3 +from botocore.config import Config + +# Type alias for CheckFunction +CheckFunction = Callable[[str], str] + +class ComprehendFilterAgentOptions(AgentOptions): + def __init__(self, + enable_sentiment_check: bool = True, + enable_pii_check: bool = True, + enable_toxicity_check: bool = True, + sentiment_threshold: float = 0.7, + toxicity_threshold: float = 0.7, + allow_pii: bool = False, + language_code: str = 'en', + **kwargs): + super().__init__(**kwargs) + self.enable_sentiment_check = enable_sentiment_check + self.enable_pii_check = enable_pii_check + self.enable_toxicity_check = enable_toxicity_check + self.sentiment_threshold = sentiment_threshold + self.toxicity_threshold = toxicity_threshold + self.allow_pii = allow_pii + self.language_code = language_code + +class ComprehendFilterAgent(Agent): + def __init__(self, options: ComprehendFilterAgentOptions): + super().__init__(options) + + config = Config(region_name=options.region) if options.region else None + self.comprehend_client = boto3.client('comprehend', config=config) + + self.custom_checks: List[CheckFunction] = [] + + self.enable_sentiment_check = options.enable_sentiment_check + self.enable_pii_check = options.enable_pii_check + self.enable_toxicity_check = options.enable_toxicity_check + self.sentiment_threshold = options.sentiment_threshold + self.toxicity_threshold = options.toxicity_threshold + self.allow_pii = options.allow_pii + self.language_code = self.validate_language_code(options.language_code) or 'en' + + # Ensure at least one check is enabled + if not any([self.enable_sentiment_check, self.enable_pii_check, self.enable_toxicity_check]): + self.enable_toxicity_check = True + + async def process_request(self, + input_text: str, + user_id: str, + session_id: str, + chat_history: List[ConversationMessage], + additional_params: Optional[Dict[str, str]] = None) -> Optional[ConversationMessage]: + try: + issues: List[str] = [] + + # Run all checks + sentiment_result = self.detect_sentiment(input_text) if self.enable_sentiment_check else None + pii_result = self.detect_pii_entities(input_text) if self.enable_pii_check else None + toxicity_result = self.detect_toxic_content(input_text) if self.enable_toxicity_check else None + + # Process results + if self.enable_sentiment_check and sentiment_result: + sentiment_issue = self.check_sentiment(sentiment_result) + if sentiment_issue: + issues.append(sentiment_issue) + + if self.enable_pii_check and pii_result: + pii_issue = self.check_pii(pii_result) + if pii_issue: + issues.append(pii_issue) + + if self.enable_toxicity_check and toxicity_result: + toxicity_issue = self.check_toxicity(toxicity_result) + if toxicity_issue: + issues.append(toxicity_issue) + + # Run custom checks + for check in self.custom_checks: + custom_issue = await check(input_text) + if custom_issue: + issues.append(custom_issue) + + if issues: + Logger.logger.warning(f"Content filter issues detected: {'; '.join(issues)}") + return None # Return None to indicate content should not be processed further + + # If no issues, return the original input as a ConversationMessage + return ConversationMessage( + role=ParticipantRole.ASSISTANT, + content=[{"text": input_text}] + ) + + except Exception as error: + Logger.logger.error("Error in ComprehendContentFilterAgent:", error) + raise + + def add_custom_check(self, check: CheckFunction): + self.custom_checks.append(check) + + def check_sentiment(self, result: Dict[str, Any]) -> Optional[str]: + if result['Sentiment'] == 'NEGATIVE' and result['SentimentScore']['Negative'] > self.sentiment_threshold: + return f"Negative sentiment detected ({result['SentimentScore']['Negative']:.2f})" + return None + + def check_pii(self, result: Dict[str, Any]) -> Optional[str]: + if not self.allow_pii and result.get('Entities'): + return f"PII detected: {', '.join(e['Type'] for e in result['Entities'])}" + return None + + def check_toxicity(self, result: Dict[str, Any]) -> Optional[str]: + toxic_labels = self.get_toxic_labels(result) + if toxic_labels: + return f"Toxic content detected: {', '.join(toxic_labels)}" + return None + + def detect_sentiment(self, text: str) -> Dict[str, Any]: + return self.comprehend_client.detect_sentiment( + Text=text, + LanguageCode=self.language_code + ) + + def detect_pii_entities(self, text: str) -> Dict[str, Any]: + return self.comprehend_client.detect_pii_entities( + Text=text, + LanguageCode=self.language_code + ) + + def detect_toxic_content(self, text: str) -> Dict[str, Any]: + return self.comprehend_client.detect_toxic_content( + TextSegments=[{"Text": text}], + LanguageCode=self.language_code + ) + + def get_toxic_labels(self, toxicity_result: Dict[str, Any]) -> List[str]: + toxic_labels = [] + for result in toxicity_result.get('ResultList', []): + for label in result.get('Labels', []): + if label['Score'] > self.toxicity_threshold: + toxic_labels.append(label['Name']) + return toxic_labels + + def set_language_code(self, language_code: str): + validated_language_code = self.validate_language_code(language_code) + if validated_language_code: + self.language_code = validated_language_code + else: + raise ValueError(f"Invalid language code: {language_code}") + + @staticmethod + def validate_language_code(language_code: Optional[str]) -> Optional[str]: + if not language_code: + return None + + valid_language_codes = ['en', 'es', 'fr', 'de', 'it', 'pt', 'ar', 'hi', 'ja', 'ko', 'zh', 'zh-TW'] + return language_code if language_code in valid_language_codes else None \ No newline at end of file From c41e3d683969183cca8762bbec37e53136a5d2f0 Mon Sep 17 00:00:00 2001 From: Corneliu CROITORU Date: Wed, 28 Aug 2024 18:18:17 +0200 Subject: [PATCH 3/3] update doc --- docs/astro.config.mjs | 5 +- .../agents/built-in/bedrock-llm-agent.mdx | 15 +- .../built-in/bedrock-translator-agent.mdx | 276 ++++++++++++ .../docs/agents/built-in/chain-agent.mdx | 208 +++++++++ .../built-in/comprehend-filter-agent.mdx | 421 ++++++++++++++++++ .../agents/chain_agent.py | 5 +- .../classifiers/anthropic_classifier.py | 3 +- .../multi_agent_orchestrator/orchestrator.py | 2 +- 8 files changed, 922 insertions(+), 13 deletions(-) create mode 100644 docs/src/content/docs/agents/built-in/bedrock-translator-agent.mdx create mode 100644 docs/src/content/docs/agents/built-in/chain-agent.mdx create mode 100644 docs/src/content/docs/agents/built-in/comprehend-filter-agent.mdx diff --git a/docs/astro.config.mjs b/docs/astro.config.mjs index ee9d77d..4731a5c 100644 --- a/docs/astro.config.mjs +++ b/docs/astro.config.mjs @@ -63,7 +63,10 @@ export default defineConfig({ { label: 'Amazon Bedrock Agent', link: '/agents/built-in/amazon-bedrock-agent' }, { label: 'Amazon Lex Bot Agent', link: '/agents/built-in/lex-bot-agent' }, { label: 'AWS Lambda Agent', link: '/agents/built-in/lambda-agent' }, - { label: 'OpenAI Agent', link: '/agents/built-in/openai-agent' } + { label: 'OpenAI Agent', link: '/agents/built-in/openai-agent' }, + { label: 'Chain Agent', link: '/agents/built-in/chain-agent' }, + { label: 'Comprehend Filter Agent', link: '/agents/built-in/comprehend-filter-agent' }, + { label: 'Amazon Bedrock Translator Agent', link: '/agents/built-in/bedrock-translator-agent' } ] }, { label: 'Custom Agents', link: '/agents/custom-agents' }, diff --git a/docs/src/content/docs/agents/built-in/bedrock-llm-agent.mdx b/docs/src/content/docs/agents/built-in/bedrock-llm-agent.mdx index 41e7d90..ae750db 100644 --- a/docs/src/content/docs/agents/built-in/bedrock-llm-agent.mdx +++ b/docs/src/content/docs/agents/built-in/bedrock-llm-agent.mdx @@ -4,7 +4,7 @@ description: Documentation for the BedrockLLMAgent in the Multi-Agent Orchestrat --- ## Overview -The `BedrockLLMAgent` is a powerful and flexible agent class in the Multi-Agent Orchestrator System. It leverages [Amazon Bedrock's Converse API](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html) to interact with various LLMs supported by Amazon Bedrock. +The **Bedrock LLM Agent** is a powerful and flexible agent class in the Multi-Agent Orchestrator System. It leverages [Amazon Bedrock's Converse API](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html) to interact with various LLMs supported by Amazon Bedrock. This agent can handle a wide range of processing tasks, making it suitable for diverse applications such as conversational AI, question-answering systems, and more. @@ -20,11 +20,11 @@ This agent can handle a wide range of processing tasks, making it suitable for d ## Creating a BedrockLLMAgent -By default, the Bedrock LLM Agent uses the `anthropic.claude-3-haiku-20240307-v1:0` model. +By default, the **Bedrock LLM Agent** uses the `anthropic.claude-3-haiku-20240307-v1:0` model. ### Basic Example -To create a new `BedrockLLMAgent` with only the required parameters, use the following code: +To create a new **Bedrock LLM Agent** with only the required parameters, use the following code: import { Tabs, TabItem } from '@astrojs/starlight/components'; @@ -55,7 +55,7 @@ In this basic example, only the `name` and `description` are provided, which are ### Advanced Example -For more complex use cases, you can create a BedrockLLMAgent with all available options. All parameters except `name` and `description` are optional: +For more complex use cases, you can create a **Bedrock LLM Agent** with all available options. All parameters except `name` and `description` are optional: @@ -177,7 +177,6 @@ For more complex use cases, you can create a BedrockLLMAgent with all available ## Setting a New Prompt -You can update the agent's system prompt at any time using the `set_system_prompt` method: @@ -214,7 +213,7 @@ This method allows you to dynamically change the agent's behavior and focus with ## Adding the Agent to the Orchestrator -To integrate the BedrockLLMAgent into your Multi-Agent Orchestrator, follow these steps: +To integrate the **Bedrock LLM Agent** into your orchestrator, follow these steps: 1. First, ensure you have created an instance of the orchestrator: @@ -273,4 +272,6 @@ To integrate the BedrockLLMAgent into your Multi-Agent Orchestrator, follow thes -By leveraging the `BedrockLLMAgent`, you can create sophisticated, context-aware AI agents capable of handling a wide range of tasks and interactions, all powered by the latest LLM models available through Amazon Bedrock. \ No newline at end of file +--- + +By leveraging the **Bedrock LLM Agent**, you can create sophisticated, context-aware AI agents capable of handling a wide range of tasks and interactions, all powered by the latest LLM models available through Amazon Bedrock. \ No newline at end of file diff --git a/docs/src/content/docs/agents/built-in/bedrock-translator-agent.mdx b/docs/src/content/docs/agents/built-in/bedrock-translator-agent.mdx new file mode 100644 index 0000000..06123e6 --- /dev/null +++ b/docs/src/content/docs/agents/built-in/bedrock-translator-agent.mdx @@ -0,0 +1,276 @@ +--- +title: Bedrock Translator Agent +description: Documentation for the Bedrock Translator Agent in the Multi-Agent Orchestrator System +--- + +The `BedrockTranslatorAgent` uses Amazon Bedrock's language models to translate text between different languages. + +## Key Features + +- Utilizes Amazon Bedrock's language models +- Supports translation between multiple languages +- Allows dynamic setting of source and target languages +- Can be used standalone or as part of a [ChainAgent](/multi-agent-orchestrator/agents/built-in/chain-agent) +- Configurable inference parameters for fine-tuned control + +## Creating a Bedrock Translator Agent + +### Basic Example + +To create a new `BedrockTranslatorAgent` with minimal configuration: + +import { Tabs, TabItem } from '@astrojs/starlight/components'; + + + + ```typescript + import { BedrockTranslatorAgent, BedrockTranslatorAgentOptions } from 'multi-agent-orchestrator'; + + const agent = new BedrockTranslatorAgent({ + name: 'BasicTranslator', + description: 'Translates text to English', + targetLanguage: 'English' + }); + ``` + + + ```python + from multi_agent_orchestrator.agents import BedrockTranslatorAgent, BedrockTranslatorAgentOptions + + agent = BedrockTranslatorAgent(BedrockTranslatorAgentOptions( + name='BasicTranslator', + description='Translates text to English', + target_language='English' + )) + ``` + + + +### Advanced Example + +For more complex use cases, you can create a BedrockTranslatorAgent with custom settings: + + + + ```typescript + import { BedrockTranslatorAgent, BedrockTranslatorAgentOptions, BEDROCK_MODEL_ID_CLAUDE_3_SONNET } from 'multi-agent-orchestrator'; + + const options: BedrockTranslatorAgentOptions = { + name: 'AdvancedTranslator', + description: 'Advanced translator with custom settings', + sourceLanguage: 'French', + targetLanguage: 'German', + modelId: BEDROCK_MODEL_ID_CLAUDE_3_SONNET, + region: 'us-west-2', + inferenceConfig: { + maxTokens: 2000, + temperature: 0.1, + topP: 0.95, + stopSequences: ['###'] + } + }; + + const agent = new BedrockTranslatorAgent(options); + ``` + + + ```python + from multi_agent_orchestrator.agents import BedrockTranslatorAgent, BedrockTranslatorAgentOptions + from multi_agent_orchestrator.types import BEDROCK_MODEL_ID_CLAUDE_3_SONNET + + options = BedrockTranslatorAgentOptions( + name='AdvancedTranslator', + description='Advanced translator with custom settings', + source_language='French', + target_language='German', + model_id=BEDROCK_MODEL_ID_CLAUDE_3_SONNET, + region='us-west-2', + inference_config={ + 'maxTokens': 2000, + 'temperature': 0.1, + 'topP': 0.95, + 'stopSequences': ['###'] + } + ) + + agent = BedrockTranslatorAgent(options) + ``` + + + +## Dynamic Language Setting + +To set the language during the invocation: + + + + ```typescript + import { MultiAgentOrchestrator, BedrockTranslatorAgent } from 'multi-agent-orchestrator'; + + const translator = new BedrockTranslatorAgent({ + name: 'DynamicTranslator', + description: 'Translator with dynamically set languages' + }); + + const orchestrator = new MultiAgentOrchestrator(); + orchestrator.addAgent(translator); + + async function translateWithDynamicLanguages(text: string, fromLang: string, toLang: string) { + translator.setSourceLanguage(fromLang); + translator.setTargetLanguage(toLang); + + const response = await orchestrator.routeRequest( + text, + 'user123', + 'session456' + ); + + console.log(`Translated from ${fromLang} to ${toLang}:`, response); + } + + // Usage + translateWithDynamicLanguages("Hello, world!", "English", "French"); + translateWithDynamicLanguages("Bonjour le monde!", "French", "Spanish"); + ``` + + + ```python + from multi_agent_orchestrator.orchestrator import MultiAgentOrchestrator + from multi_agent_orchestrator.agents import BedrockTranslatorAgent, BedrockTranslatorAgentOptions + + translator = BedrockTranslatorAgent(BedrockTranslatorAgentOptions( + name='DynamicTranslator', + description='Translator with dynamically set languages' + )) + + orchestrator = MultiAgentOrchestrator() + orchestrator.add_agent(translator) + + async def translate_with_dynamic_languages(text: str, from_lang: str, to_lang: str): + translator.set_source_language(from_lang) + translator.set_target_language(to_lang) + + response = await orchestrator.route_request( + text, + 'user123', + 'session456' + ) + + print(f"Translated from {from_lang} to {to_lang}:", response) + + # Usage + import asyncio + + asyncio.run(translate_with_dynamic_languages("Hello, world!", "English", "French")) + asyncio.run(translate_with_dynamic_languages("Bonjour le monde!", "French", "Spanish")) + ``` + + + +## Usage with ChainAgent + +The `BedrockTranslatorAgent` can be effectively used within a `ChainAgent` for complex multilingual processing workflows. Here's an example that demonstrates translating user input and processing it: + + + + ```typescript + import { MultiAgentOrchestrator, ChainAgent, BedrockTranslatorAgent, BedrockLLMAgent } from 'multi-agent-orchestrator'; + + // Create translator agents + const translatorToEnglish = new BedrockTranslatorAgent({ + name: 'TranslatorToEnglish', + description: 'Translates input to English', + targetLanguage: 'English' + }); + + // Create a processing agent (e.g., a BedrockLLMAgent) + const processor = new BedrockLLMAgent({ + name: 'EnglishProcessor', + description: 'Processes text in English' + }); + + // Create a ChainAgent + const chainAgent = new ChainAgent({ + name: 'TranslateProcessTranslate', + description: 'Translates, processes, and translates back', + agents: [translatorToEnglish, processor] + }); + + const orchestrator = new MultiAgentOrchestrator(); + orchestrator.addAgent(chainAgent); + + // Function to handle user input + async function handleMultilingualInput(input: string, sourceLanguage: string) { + translatorToEnglish.setSourceLanguage(sourceLanguage); + + const response = await orchestrator.routeRequest( + input, + 'user123', + 'session456' + ); + + console.log('Response:', response); + } + + // Usage + handleMultilingualInput("Hola, ¿cómo estás?", "Spanish"); + ``` + + + ```python + from multi_agent_orchestrator.orchestrator import MultiAgentOrchestrator + from multi_agent_orchestrator.agents import ChainAgent, BedrockTranslatorAgent, BedrockLLMAgent + from multi_agent_orchestrator.agents import ChainAgentOptions, BedrockTranslatorAgentOptions, BedrockLLMAgentOptions + + # Create translator agents + translator_to_english = BedrockTranslatorAgent(BedrockTranslatorAgentOptions( + name='TranslatorToEnglish', + description='Translates input to English', + target_language='English' + )) + + # Create a processing agent (e.g., a BedrockLLMAgent) + processor = BedrockLLMAgent(BedrockLLMAgentOptions( + name='EnglishProcessor', + description='Processes text in English' + )) + + # Create a ChainAgent + chain_agent = ChainAgent(ChainAgentOptions( + name='TranslateProcessTranslate', + description='Translates, processes, and translates back', + agents=[translator_to_english, processor] + )) + + orchestrator = MultiAgentOrchestrator() + orchestrator.add_agent(chain_agent) + + # Function to handle user input + async def handle_multilingual_input(input_text: str, source_language: str): + translator_to_english.set_source_language(source_language) + + response = await orchestrator.route_request( + input_text, + 'user123', + 'session456' + ) + + print('Response:', response) + + # Usage + import asyncio + + asyncio.run(handle_multilingual_input("Hola, ¿cómo estás?", "Spanish")) + ``` + + + +In this example: +1. The first translator agent converts the input to English. +2. The processor agent (e.g., a `BedrockLLMAgent`) processes the English text. + +This setup allows for seamless multilingual processing, where the core logic can be implemented in English while supporting input and output in various languages. + +--- + +By leveraging the `BedrockTranslatorAgent`, you can create sophisticated multilingual applications and workflows, enabling seamless communication and processing across language barriers in your Multi-Agent Orchestrator system. \ No newline at end of file diff --git a/docs/src/content/docs/agents/built-in/chain-agent.mdx b/docs/src/content/docs/agents/built-in/chain-agent.mdx new file mode 100644 index 0000000..7402d74 --- /dev/null +++ b/docs/src/content/docs/agents/built-in/chain-agent.mdx @@ -0,0 +1,208 @@ +--- +title: Chain Agent +description: Documentation for the Chain Agent in the Multi-Agent Orchestrator System +--- +The `ChainAgent` is an agent class in the Multi-Agent Orchestrator System that allows for the sequential execution of multiple agents. It processes a request by passing the output of one agent as input to the next, creating a chain of agent interactions. + +## Creating a ChainAgent + +### Basic Example + +Here's how to create a ChainAgent with only the required parameters: + +import { Tabs, TabItem } from '@astrojs/starlight/components'; + + + + ```typescript + import { ChainAgent, ChainAgentOptions } from 'multi-agent-orchestrator'; + import { BedrockLLMAgent } from 'multi-agent-orchestrator'; + + const agent1 = new BedrockLLMAgent({ + name: 'Agent 1', + description: '..AGENT DESCRIPTION..' + }); + + const agent2 = new BedrockLLMAgent({ + name: 'Agent 2', + description: '..AGENT DESCRIPTION..' + }); + + const chainAgent = new ChainAgent({ + name: 'Chain Tech Agent', + description: 'Specializes in technology areas including software development, hardware, AI, cybersecurity, blockchain, cloud computing, emerging tech innovations, and pricing/costs related to technology products and services.', + agents: [agent1, agent2] + }); + ``` + + + ```python + from multi_agent_orchestrator.agents import ChainAgent, ChainAgentOptions + from multi_agent_orchestrator.agents import BedrockLLMAgent, BedrockLLMAgentOptions + + agent1 = BedrockLLMAgent(BedrockLLMAgentOptions( + name='Agent 1', + description='..AGENT DESCRIPTION..' + )) + + agent2 = BedrockLLMAgent(BedrockLLMAgentOptions( + name='Agent 2', + description='..AGENT DESCRIPTION..' + )) + + chain_agent = ChainAgent(ChainAgentOptions( + name='BasicChainAgent', + description='A simple chain of multiple agents', + agents=[agent1, agent2] + )) + ``` + + + +### Intermediate Example + +This example shows how to create a ChainAgent with a custom default output: + + + + ```typescript + import { ChainAgent, ChainAgentOptions } from 'multi-agent-orchestrator'; + import { BedrockLLMAgent } from 'multi-agent-orchestrator'; + + const agent1 = new BedrockLLMAgent({ + name: 'Agent 1', + description: '..AGENT DESCRIPTION..' + }); + + const agent2 = new BedrockLLMAgent({ + name: 'Agent 2', + description: '..AGENT DESCRIPTION..', + streaming: true + }); + + const chainAgent = new ChainAgent({ + name: 'IntermediateChainAgent', + description: 'A chain of agents with custom default output', + agents: [agent1, agent2], + defaultOutput: 'The chain encountered an issue during processing.' + }); + ``` + + + ```python + from multi_agent_orchestrator.agents import ChainAgent, ChainAgentOptions + from multi_agent_orchestrator.agents import BedrockLLMAgent, BedrockLLMAgentOptions + + agent1 = BedrockLLMAgent(BedrockLLMAgentOptions( + name='Agent 1', + description='..AGENT DESCRIPTION..' + )) + + agent2 = BedrockLLMAgent(BedrockLLMAgentOptions( + name='Agent 2', + description='..AGENT DESCRIPTION..' + )) + + chain_agent = ChainAgent(ChainAgentOptions( + name='IntermediateChainAgent', + description='A chain of agents with custom default output', + agents=[agent1, agent2], + default_output='The chain encountered an issue during processing.' + )) + ``` + + + +### Advanced Example + +For more complex use cases, you can create a ChainAgent with all available options: + + + + ```typescript + import { ChainAgent, ChainAgentOptions } from 'multi-agent-orchestrator'; + import { BedrockLLMAgent } from 'multi-agent-orchestrator'; + + const agent1 = new BedrockLLMAgent({ + name: 'Agent 1', + description: '..AGENT DESCRIPTION..' + }); + + const agent2 = new BedrockLLMAgent({ + name: 'Agent 2', + description: '..AGENT DESCRIPTION..', + streaming: true + }); + + const options: ChainAgentOptions = { + name: 'AdvancedChainAgent', + description: 'A sophisticated chain of agents with all options', + agents: [agent1, agent2], + defaultOutput: 'The chain processing encountered an issue.', + saveChat: true + }; + + const chainAgent = new ChainAgent(options); + ``` + + + ```python + from multi_agent_orchestrator.agents import ChainAgent, ChainAgentOptions + from multi_agent_orchestrator.agents import BedrockLLMAgent, BedrockLLMAgentOptions + + agent1 = BedrockLLMAgent(BedrockLLMAgentOptions( + name='Agent 1', + description='..AGENT DESCRIPTION..' + )) + + agent2 = BedrockLLMAgent(BedrockLLMAgentOptions( + name='Agent 2', + description='..AGENT DESCRIPTION..', + streaming=True + )) + + options = ChainAgentOptions( + name='AdvancedChainAgent', + description='A sophisticated chain of agents with all options', + agents=[agent1, agent2], + default_output='The chain processing encountered an issue.', + save_chat=True + ) + + chain_agent = ChainAgent(options) + ``` + + + +## Integrating ChainAgent into the Multi-Agent Orchestrator + +To integrate the ChainAgent into your Multi-Agent Orchestrator: + + + + ```typescript + import { MultiAgentOrchestrator } from "multi-agent-orchestrator"; + + const orchestrator = new MultiAgentOrchestrator(); + orchestrator.addAgent(chainAgent); + ``` + + + ```python + from multi_agent_orchestrator.orchestrator import MultiAgentOrchestrator + + orchestrator = MultiAgentOrchestrator() + orchestrator.add_agent(chain_agent) + ``` + + + +## Streaming Responses + +The ChainAgent supports streaming responses only for the last agent in the chain. + +This design ensures efficient processing through the chain while still enabling streaming capabilities for the end result. + +--- + +By leveraging the ChainAgent, you can create sophisticated, multi-step processing pipelines within your Multi-Agent Orchestrator system, allowing for complex interactions and transformations of user inputs, with the added flexibility of streaming output from the final processing step. \ No newline at end of file diff --git a/docs/src/content/docs/agents/built-in/comprehend-filter-agent.mdx b/docs/src/content/docs/agents/built-in/comprehend-filter-agent.mdx new file mode 100644 index 0000000..adbddb2 --- /dev/null +++ b/docs/src/content/docs/agents/built-in/comprehend-filter-agent.mdx @@ -0,0 +1,421 @@ +--- +title: Comprehend Filter Agent +description: Documentation for the Comprehend Filter Agent in the Multi-Agent Orchestrator System +--- +The `ComprehendFilterAgent` is an agent class in the Multi-Agent Orchestrator System that uses [Amazon Comprehend](https://aws.amazon.com/comprehend/?nc1=h_ls) to analyze and filter content based on sentiment, Personally Identifiable Information (PII), and toxicity. + +It can be used as a standalone agent within the Multi-Agent Orchestrator or as part of a chain in the ChainAgent. + +When used in a [ChainAgent](/multi-agent-orchestrator/agents/built-in/chain-agent) configuration, it's particularly effective as the first agent in the list. In this setup, it can check the user input against all configured filters, and if the content passes these checks, it will forward the original user input to the next agent in the chain. This allows for a robust content moderation system that can be seamlessly integrated into more complex processing pipelines, ensuring that only appropriate content is processed by subsequent agents. + +## Key Features + +- Content analysis using Amazon Comprehend +- Configurable checks for sentiment, PII, and toxicity +- Customizable thresholds for sentiment and toxicity +- Support for multiple languages +- Ability to add custom content checks + +## Creating a Comprehend Filter Agent + +### Basic Example + +To create a new `ComprehendFilterAgent` with default settings: + +import { Tabs, TabItem } from '@astrojs/starlight/components'; + + + + ```typescript + import { ComprehendFilterAgent, ComprehendFilterAgentOptions } from 'multi-agent-orchestrator'; + + const agent = new ComprehendFilterAgent({ + name: 'ContentModerator', + description: 'Analyzes and filters content using Amazon Comprehend' + }); + ``` + + + ```python + from multi_agent_orchestrator.agents import ComprehendFilterAgent, ComprehendFilterAgentOptions + + agent = ComprehendFilterAgent(ComprehendFilterAgentOptions( + name='ContentModerator', + description='Analyzes and filters content using Amazon Comprehend' + )) + ``` + + + +### Advanced Example + +For more complex use cases, you can create a `ComprehendFilterAgent` with custom settings: + + + + ```typescript + import { ComprehendFilterAgent, ComprehendFilterAgentOptions } from 'multi-agent-orchestrator'; + + const options: ComprehendFilterAgentOptions = { + name: 'AdvancedContentModerator', + description: 'Advanced content moderation with custom settings', + region: 'us-west-2', + enableSentimentCheck: true, + enablePiiCheck: true, + enableToxicityCheck: true, + sentimentThreshold: 0.8, + toxicityThreshold: 0.6, + allowPii: false, + languageCode: 'en' + }; + + const agent = new ComprehendFilterAgent(options); + ``` + + + ```python + from multi_agent_orchestrator.agents import ComprehendFilterAgent, ComprehendFilterAgentOptions + + options = ComprehendFilterAgentOptions( + name='AdvancedContentModerator', + description='Advanced content moderation with custom settings', + region='us-west-2', + enable_sentiment_check=True, + enable_pii_check=True, + enable_toxicity_check=True, + sentiment_threshold=0.8, + toxicity_threshold=0.6, + allow_pii=False, + language_code='en' + ) + + agent = ComprehendFilterAgent(options) + ``` + + + +## Integrating Comprehend Filter Agent + +To integrate the `ComprehendFilterAgent` into your orchestrator: + + + + ```typescript + import { MultiAgentOrchestrator } from "multi-agent-orchestrator"; + + const orchestrator = new MultiAgentOrchestrator(); + orchestrator.addAgent(agent); + ``` + + + ```python + from multi_agent_orchestrator.orchestrator import MultiAgentOrchestrator + + orchestrator = MultiAgentOrchestrator() + orchestrator.add_agent(agent) + ``` + + + +## Adding Custom Checks + +This example demonstrates how to add a **Custom Check** to the `ComprehendFilterAgent`: + + + + ```typescript + import { ComprehendFilterAgent, ComprehendFilterAgentOptions } from 'multi-agent-orchestrator'; + + const filterAgent = new ComprehendFilterAgent({ + name: 'AdvancedContentFilter', + description: 'Advanced content filter with custom checks' + }); + + // Add a custom check for specific keywords + filterAgent.addCustomCheck(async (text: string) => { + const keywords = ['banned', 'inappropriate', 'offensive']; + for (const keyword of keywords) { + if (text.toLowerCase().includes(keyword)) { + return `Banned keyword detected: ${keyword}`; + } + } + return null; + }); + + const orchestrator = new MultiAgentOrchestrator(); + orchestrator.addAgent(filterAgent); + + const response = await orchestrator.routeRequest( + "This message contains a banned word.", + "user789", + "session101" + ); + + if (response) { + console.log("Content passed all checks"); + } else { + console.log("Content was flagged by the filter"); + } + ``` + + + ```python + from multi_agent_orchestrator.orchestrator import MultiAgentOrchestrator + from multi_agent_orchestrator.agents import ComprehendFilterAgent, ComprehendFilterAgentOptions + + filter_agent = ComprehendFilterAgent(ComprehendFilterAgentOptions( + name='AdvancedContentFilter', + description='Advanced content filter with custom checks' + )) + + # Add a custom check for specific keywords + async def custom_keyword_check(text: str) -> Optional[str]: + keywords = ['banned', 'inappropriate', 'offensive'] + for keyword in keywords: + if keyword in text.lower(): + return f"Banned keyword detected: {keyword}" + return None + + filter_agent.add_custom_check(custom_keyword_check) + + orchestrator = MultiAgentOrchestrator() + orchestrator.add_agent(filter_agent) + + response = await orchestrator.route_request( + "This message contains a banned word.", + "user789", + "session101" + ) + + if response: + print("Content passed all checks") + else: + print("Content was flagged by the filter") + ``` + + + +## Dynamic Language Detection and Handling + +The `ComprehendFilterAgent` offers flexible language handling capabilities. You can specify the language either at initialization or dynamically during invocation. Additionally, it supports automatic language detection, allowing it to adapt to content in various languages without manual specification. + +This example demonstrates dynamic language detection and handling: + + + + ```typescript + import { MultiAgentOrchestrator, ComprehendFilterAgent } from 'multi-agent-orchestrator'; + import { ComprehendClient, DetectDominantLanguageCommand } from "@aws-sdk/client-comprehend"; + + const filterAgent = new ComprehendFilterAgent({ + name: 'MultilingualContentFilter', + description: 'Filters content in multiple languages' + }); + + const orchestrator = new MultiAgentOrchestrator(); + orchestrator.addAgent(filterAgent); + + async function detectLanguage(text: string): Promise { + const comprehendClient = new ComprehendClient({ region: "us-east-1" }); + const command = new DetectDominantLanguageCommand({ Text: text }); + const response = await comprehendClient.send(command); + return response.Languages[0].LanguageCode; + } + + let detectedLanguage: string | null = null; + + async function processUserInput(userInput: string, userId: string, sessionId: string): Promise { + if (!detectedLanguage) { + detectedLanguage = await detectLanguage(userInput); + console.log(`Detected language: ${detectedLanguage}`); + } + + try { + const response = await orchestrator.routeRequest( + userInput, + userId, + sessionId, + { languageCode: detectedLanguage } + ); + + console.log("Processed response:", response); + } catch (error) { + console.error("Error:", error); + } + } + + // Example usage + processUserInput("Hello, world!", "user123", "session456"); + // Subsequent calls will use the same detected language + processUserInput("How are you?", "user123", "session456"); + ``` + + + ```python + from multi_agent_orchestrator.orchestrator import MultiAgentOrchestrator + from multi_agent_orchestrator.agents import ComprehendFilterAgent, ComprehendFilterAgentOptions + import boto3 + import asyncio + + filter_agent = ComprehendFilterAgent(ComprehendFilterAgentOptions( + name='MultilingualContentFilter', + description='Filters content in multiple languages' + )) + + orchestrator = MultiAgentOrchestrator() + orchestrator.add_agent(filter_agent) + + def detect_language(text: str) -> str: + comprehend = boto3.client('comprehend', region_name='us-east-1') + response = comprehend.detect_dominant_language(Text=text) + return response['Languages'][0]['LanguageCode'] + + detected_language = None + + async def process_user_input(user_input: str, user_id: str, session_id: str): + global detected_language + if not detected_language: + detected_language = detect_language(user_input) + print(f"Detected language: {detected_language}") + + try: + response = await orchestrator.route_request( + user_input, + user_id, + session_id, + additional_params={"language_code": detected_language} + ) + + print("Processed response:", response) + except Exception as error: + print("Error:", error) + + # Example usage + asyncio.run(process_user_input("Hello, world!", "user123", "session456")) + # Subsequent calls will use the same detected language + asyncio.run(process_user_input("How are you?", "user123", "session456")) + ``` + + + +## Usage with ChainAgent + +This example demonstrates how to use the `ComprehendFilterAgent` as part of a `ChainAgent` configuration: + + + + ```typescript + import { MultiAgentOrchestrator, ChainAgent, ComprehendFilterAgent, BedrockLLMAgent } from 'multi-agent-orchestrator'; + + // Create a ComprehendFilterAgent + const filterAgent = new ComprehendFilterAgent({ + name: 'ContentFilter', + description: 'Filters inappropriate content', + enableSentimentCheck: true, + enablePiiCheck: true, + enableToxicityCheck: true, + sentimentThreshold: 0.7, + toxicityThreshold: 0.6 + }); + + // Create a BedrockLLMAgent (or any other agent you want to use after filtering) + const llmAgent = new BedrockLLMAgent({ + name: 'LLMProcessor', + description: 'Processes filtered content using a language model' + }); + + // Create a ChainAgent that combines the filter and LLM agents + const chainAgent = new ChainAgent({ + name: 'FilteredLLMChain', + description: 'Chain that filters content before processing with LLM', + agents: [filterAgent, llmAgent] + }); + + // Add the chain agent to the orchestrator + const orchestrator = new MultiAgentOrchestrator(); + orchestrator.addAgent(chainAgent); + + // Use the chain + const response = await orchestrator.routeRequest( + "Process this message after ensuring it's appropriate.", + "user123", + "session456" + ); + + if (response) { + console.log("Message processed successfully:", response); + } else { + console.log("Message was filtered out due to inappropriate content"); + } + ``` + + + ```python + from multi_agent_orchestrator.orchestrator import MultiAgentOrchestrator + from multi_agent_orchestrator.agents import ChainAgent, ComprehendFilterAgent, BedrockLLMAgent + from multi_agent_orchestrator.agents import ChainAgentOptions, ComprehendFilterAgentOptions, BedrockLLMAgentOptions + + # Create a ComprehendFilterAgent + filter_agent = ComprehendFilterAgent(ComprehendFilterAgentOptions( + name='ContentFilter', + description='Filters inappropriate content', + enable_sentiment_check=True, + enable_pii_check=True, + enable_toxicity_check=True, + sentiment_threshold=0.7, + toxicity_threshold=0.6 + )) + + # Create a BedrockLLMAgent (or any other agent you want to use after filtering) + llm_agent = BedrockLLMAgent(BedrockLLMAgentOptions( + name='LLMProcessor', + description='Processes filtered content using a language model' + )) + + # Create a ChainAgent that combines the filter and LLM agents + chain_agent = ChainAgent(ChainAgentOptions( + name='FilteredLLMChain', + description='Chain that filters content before processing with LLM', + agents=[filter_agent, llm_agent] + )) + + # Add the chain agent to the orchestrator + orchestrator = MultiAgentOrchestrator() + orchestrator.add_agent(chain_agent) + + # Use the chain + response = await orchestrator.route_request( + "Process this message after ensuring it's appropriate.", + "user123", + "session456" + ) + + if response: + print("Message processed successfully:", response) + else: + print("Message was filtered out due to inappropriate content") + ``` + + + +## Configuration Options + +The `ComprehendFilterAgent` supports the following configuration options: + +- `enableSentimentCheck`: Enable sentiment analysis (default: true) +- `enablePiiCheck`: Enable PII detection (default: true) +- `enableToxicityCheck`: Enable toxicity detection (default: true) +- `sentimentThreshold`: Threshold for negative sentiment (default: 0.7) +- `toxicityThreshold`: Threshold for toxic content (default: 0.7) +- `allowPii`: Allow PII in content (default: false) +- `languageCode`: ISO 639-1 language code for analysis (default: 'en') + +## Supported Languages + +The `ComprehendFilterAgent` supports the following languages: + +'en' (English), 'es' (Spanish), 'fr' (French), 'de' (German), 'it' (Italian), 'pt' (Portuguese), 'ar' (Arabic), 'hi' (Hindi), 'ja' (Japanese), 'ko' (Korean), 'zh' (Chinese Simplified), 'zh-TW' (Chinese Traditional) + +--- + +By leveraging the `ComprehendFilterAgent`, you can implement robust content moderation in your Multi-Agent Orchestrator system, ensuring safe and appropriate interactions while leveraging the power of Amazon Comprehend for advanced content analysis. diff --git a/python/src/multi_agent_orchestrator/agents/chain_agent.py b/python/src/multi_agent_orchestrator/agents/chain_agent.py index 269b833..2f233f8 100644 --- a/python/src/multi_agent_orchestrator/agents/chain_agent.py +++ b/python/src/multi_agent_orchestrator/agents/chain_agent.py @@ -27,12 +27,11 @@ async def process_request( ) -> Union[ConversationMessage, AsyncIterable[any]]: current_input = input_text final_response: Union[ConversationMessage, AsyncIterable[any]] - print(f"Processing chain with {len(self.agents)} agents") for i, agent in enumerate(self.agents): is_last_agent = i == len(self.agents) - 1 try: - print(f"Input for agent {i}: {current_input}") + #print(f"Input for agent {i}: {current_input}") response = await agent.process_request( current_input, user_id, @@ -44,7 +43,7 @@ async def process_request( if response.content and 'text' in response.content[0]: current_input = response.content[0]['text'] final_response = response - print(f"Output from agent {i}: {current_input}") + #print(f"Output from agent {i}: {current_input}") else: Logger.logger.warning(f"Agent {agent.name} returned no text content.") return self.create_default_response() diff --git a/python/src/multi_agent_orchestrator/classifiers/anthropic_classifier.py b/python/src/multi_agent_orchestrator/classifiers/anthropic_classifier.py index 24c9ce7..ce79b21 100644 --- a/python/src/multi_agent_orchestrator/classifiers/anthropic_classifier.py +++ b/python/src/multi_agent_orchestrator/classifiers/anthropic_classifier.py @@ -4,7 +4,8 @@ from multi_agent_orchestrator.utils.logger import Logger from multi_agent_orchestrator.types import ConversationMessage from multi_agent_orchestrator.classifiers import Classifier, ClassifierResult - +import logging +logging.getLogger("httpx").setLevel(logging.WARNING) ANTHROPIC_MODEL_ID_CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20240620" diff --git a/python/src/multi_agent_orchestrator/orchestrator.py b/python/src/multi_agent_orchestrator/orchestrator.py index 7c906c6..2eb8627 100644 --- a/python/src/multi_agent_orchestrator/orchestrator.py +++ b/python/src/multi_agent_orchestrator/orchestrator.py @@ -90,7 +90,7 @@ async def dispatch_to_agent(self, agent_chat_history = await self.storage.fetch_chat(user_id, session_id, selected_agent.id) self.logger.print_chat_history(agent_chat_history, selected_agent.id) - self.logger.info(f"Routing intent '{user_input}' to {selected_agent.id} ...") + #self.logger.info(f"Routing intent '{user_input}' to {selected_agent.id} ...") response = await self.measure_execution_time( f"Agent {selected_agent.name} | Processing request",