From 48c28c3e9316eba5aaff499a7335502311f0aa1d Mon Sep 17 00:00:00 2001 From: Ivan Date: Wed, 25 Dec 2024 12:13:16 +0400 Subject: [PATCH 01/14] Initial commit --- .../backend/backend/blocks/llm.py | 38 +++++++++++-------- .../backend/backend/data/credit.py | 9 ++--- 2 files changed, 27 insertions(+), 20 deletions(-) diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index 50a9bfb3c626..cf111dc6f9cb 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -27,7 +27,9 @@ logger = logging.getLogger(__name__) -LLMProviderName = Literal["anthropic", "groq", "openai", "ollama", "open_router"] +LLMProviderName = Literal[ + "anthropic", "groq", "openai", "ollama", "open_router", "aiml" +] AICredentials = CredentialsMetaInput[LLMProviderName, Literal["api_key"]] TEST_CREDENTIALS = APIKeyCredentials( @@ -92,6 +94,10 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta): # Anthropic models CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest" CLAUDE_3_HAIKU = "claude-3-haiku-20240307" + # Aiml models + QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo" + LLAMA3_1_70B = "nvidia/llama-3.1-nemotron-70b-instruct" + LLAMA3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo" # Groq models LLAMA3_8B = "llama3-8b-8192" LLAMA3_70B = "llama3-70b-8192" @@ -477,16 +483,19 @@ def parse_response(resp: str) -> tuple[dict[str, Any], str | None]: if input_data.expected_format: parsed_dict, parsed_error = parse_response(response_text) if not parsed_error: - yield "response", { - k: ( - json.loads(v) - if isinstance(v, str) - and v.startswith("[") - and v.endswith("]") - else (", ".join(v) if isinstance(v, list) else v) - ) - for k, v in parsed_dict.items() - } + yield ( + "response", + { + k: ( + json.loads(v) + if isinstance(v, str) + and v.startswith("[") + and v.endswith("]") + else (", ".join(v) if isinstance(v, list) else v) + ) + for k, v in parsed_dict.items() + }, + ) return else: yield "response", {"response": response_text} @@ -753,9 +762,7 @@ def _combine_summaries( chunk_overlap=input_data.chunk_overlap, ), credentials=credentials, - ).send(None)[ - 1 - ] # Get the first yielded value + ).send(None)[1] # Get the first yielded value class AIConversationBlock(Block): @@ -807,7 +814,8 @@ def __init__(self): "The 2020 World Series was played at Globe Life Field in Arlington, Texas.", ), test_mock={ - "llm_call": lambda *args, **kwargs: "The 2020 World Series was played at Globe Life Field in Arlington, Texas." + "llm_call": lambda *args, + **kwargs: "The 2020 World Series was played at Globe Life Field in Arlington, Texas." }, ) diff --git a/autogpt_platform/backend/backend/data/credit.py b/autogpt_platform/backend/backend/data/credit.py index b3f8fbce5d4d..040e11587922 100644 --- a/autogpt_platform/backend/backend/data/credit.py +++ b/autogpt_platform/backend/backend/data/credit.py @@ -1,15 +1,14 @@ from abc import ABC, abstractmethod from datetime import datetime, timezone -from prisma import Json -from prisma.enums import UserBlockCreditType -from prisma.errors import UniqueViolationError -from prisma.models import UserBlockCredit - from backend.data.block import Block, BlockInput, get_block from backend.data.block_cost_config import BLOCK_COSTS from backend.data.cost import BlockCost, BlockCostType from backend.util.settings import Config +from prisma import Json +from prisma.enums import UserBlockCreditType +from prisma.errors import UniqueViolationError +from prisma.models import UserBlockCredit config = Config() From 359130682b9dfc03b30e6cf3435c939f90898715 Mon Sep 17 00:00:00 2001 From: Ivan Date: Wed, 1 Jan 2025 16:34:42 +0400 Subject: [PATCH 02/14] Integration with aiml api --- autogpt_platform/backend/.env.example | 1 + .../backend/backend/blocks/llm.py | 31 +++- .../backend/backend/data/block_cost_config.py | 23 +++ .../backend/integrations/credentials_store.py | 10 ++ .../backend/backend/util/settings.py | 1 + .../frontend/src/app/profile/page.tsx | 31 ++-- .../app/store/(user)/integrations/page.tsx | 31 ++-- .../integrations/credentials-input.tsx | 18 +-- .../integrations/credentials-provider.tsx | 1 + .../src/lib/autogpt-server-api/types.ts | 17 +-- classic/forge/forge/llm/providers/aiml.py | 135 ++++++++++++++++++ classic/forge/forge/llm/providers/multi.py | 21 ++- classic/forge/forge/llm/providers/schema.py | 36 ++--- docs/content/classic/configuration/options.md | 1 + docs/content/classic/setup/index.md | 13 ++ docs/content/index.md | 1 + 16 files changed, 295 insertions(+), 76 deletions(-) create mode 100644 classic/forge/forge/llm/providers/aiml.py diff --git a/autogpt_platform/backend/.env.example b/autogpt_platform/backend/.env.example index 0dd10e838501..0bd6a41c3388 100644 --- a/autogpt_platform/backend/.env.example +++ b/autogpt_platform/backend/.env.example @@ -63,6 +63,7 @@ GOOGLE_CLIENT_SECRET= # LLM OPENAI_API_KEY= ANTHROPIC_API_KEY= +AIML_API_KEY= GROQ_API_KEY= OPEN_ROUTER_API_KEY= diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index 8a35132ff423..56033f10f85c 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -100,9 +100,11 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta): CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest" CLAUDE_3_HAIKU = "claude-3-haiku-20240307" # Aiml models - QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo" - LLAMA3_1_70B = "nvidia/llama-3.1-nemotron-70b-instruct" - LLAMA3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo" + AIML_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo" + AIML_LLAMA3_1_70B = "nvidia/llama-3.1-nemotron-70b-instruct" + AIML_LLAMA3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo" + AIML_META_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" + AIML_LLAMA_3_2_3B = "meta-llama/Llama-3.2-3B-Instruct-Turbo" # Groq models LLAMA3_8B = "llama3-8b-8192" LLAMA3_70B = "llama3-70b-8192" @@ -159,6 +161,11 @@ def context_window(self) -> int: LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385), LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000), LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000), + LlmModel.AIML_QWEN2_5_72B: ModelMetadata("aiml", 32000), + LlmModel.AIML_LLAMA3_1_70B: ModelMetadata("aiml", 128000), + LlmModel.AIML_LLAMA3_3_70B: ModelMetadata("aiml", 128000), + LlmModel.AIML_META_LLAMA_3_1_70B: ModelMetadata("aiml", 131000), + LlmModel.AIML_LLAMA_3_2_3B: ModelMetadata("aiml", 128000), LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192), LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192), LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768), @@ -438,6 +445,24 @@ def llm_call( response.usage.prompt_tokens if response.usage else 0, response.usage.completion_tokens if response.usage else 0, ) + elif provider == "aiml": + client = openai.OpenAI( + base_url="https://api.aimlapi.com/v2", + api_key=credentials.api_key.get_secret_value(), + ) + + completion = client.chat.completions.create( + model=llm_model.value, + messages=prompt, # type: ignore + max_tokens=max_tokens, + ) + + # response = completion.choices[0].message.content + return ( + completion.choices[0].message.content or "", + completion.usage.prompt_tokens if completion.usage else 0, + completion.usage.completion_tokens if completion.usage else 0, + ) else: raise ValueError(f"Unsupported LLM provider: {provider}") diff --git a/autogpt_platform/backend/backend/data/block_cost_config.py b/autogpt_platform/backend/backend/data/block_cost_config.py index 4ed57b2b390d..ec48d5d3c023 100644 --- a/autogpt_platform/backend/backend/data/block_cost_config.py +++ b/autogpt_platform/backend/backend/data/block_cost_config.py @@ -20,6 +20,7 @@ from backend.data.block import Block from backend.data.cost import BlockCost, BlockCostType from backend.integrations.credentials_store import ( + aiml_credentials, anthropic_credentials, did_credentials, groq_credentials, @@ -43,6 +44,11 @@ LlmModel.GPT3_5_TURBO: 1, LlmModel.CLAUDE_3_5_SONNET: 4, LlmModel.CLAUDE_3_HAIKU: 1, + LlmModel.AIML_QWEN2_5_72B: 1, + LlmModel.AIML_LLAMA3_1_70B: 1, + LlmModel.AIML_LLAMA3_3_70B: 1, + LlmModel.AIML_META_LLAMA_3_1_70B: 1, + LlmModel.AIML_LLAMA_3_2_3B: 1, LlmModel.LLAMA3_8B: 1, LlmModel.LLAMA3_70B: 1, LlmModel.MIXTRAL_8X7B: 1, @@ -142,6 +148,23 @@ for model, cost in MODEL_COST.items() if MODEL_METADATA[model].provider == "open_router" ] + # AI/ML Api Models + + [ + BlockCost( + cost_type=BlockCostType.RUN, + cost_filter={ + "model": model, + "credentials": { + "id": aiml_credentials.id, + "provider": aiml_credentials.provider, + "type": aiml_credentials.type, + }, + }, + cost_amount=cost, + ) + for model, cost in MODEL_COST.items() + if MODEL_METADATA[model].provider == "aiml" + ] ) # =============== This is the exhaustive list of cost for each Block =============== # diff --git a/autogpt_platform/backend/backend/integrations/credentials_store.py b/autogpt_platform/backend/backend/integrations/credentials_store.py index 7d539b73c476..1ae2d20e4cc7 100644 --- a/autogpt_platform/backend/backend/integrations/credentials_store.py +++ b/autogpt_platform/backend/backend/integrations/credentials_store.py @@ -49,6 +49,13 @@ title="Use Credits for OpenAI", expires_at=None, ) +aiml_credentials = APIKeyCredentials( + id="aad82a89-9794-4ebb-977f-d736aa5260a3", + provider="aiml", + api_key=SecretStr(settings.secrets.aiml_api_key), + title="Use Credits for AI/ML", + expires_at=None, +) anthropic_credentials = APIKeyCredentials( id="24e5d942-d9e3-4798-8151-90143ee55629", provider="anthropic", @@ -98,6 +105,7 @@ ideogram_credentials, replicate_credentials, openai_credentials, + aiml_credentials, anthropic_credentials, groq_credentials, did_credentials, @@ -145,6 +153,8 @@ def get_all_creds(self, user_id: str) -> list[Credentials]: all_credentials.append(replicate_credentials) if settings.secrets.openai_api_key: all_credentials.append(openai_credentials) + if settings.secrets.aiml_api_key: + all_credentials.append(aiml_credentials) if settings.secrets.anthropic_api_key: all_credentials.append(anthropic_credentials) if settings.secrets.did_api_key: diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index 69504f528f3c..5028d7c3348a 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -261,6 +261,7 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings): ) openai_api_key: str = Field(default="", description="OpenAI API key") + aiml_api_key: str = Field(default="", description="AI/ML API key") anthropic_api_key: str = Field(default="", description="Anthropic API key") groq_api_key: str = Field(default="", description="Groq API key") open_router_api_key: str = Field(default="", description="Open Router API Key") diff --git a/autogpt_platform/frontend/src/app/profile/page.tsx b/autogpt_platform/frontend/src/app/profile/page.tsx index b3097577db3a..add79ed2eb40 100644 --- a/autogpt_platform/frontend/src/app/profile/page.tsx +++ b/autogpt_platform/frontend/src/app/profile/page.tsx @@ -38,11 +38,11 @@ export default function PrivatePage() { const [confirmationDialogState, setConfirmationDialogState] = useState< | { - open: true; - message: string; - onConfirm: () => void; - onReject: () => void; - } + open: true; + message: string; + onConfirm: () => void; + onReject: () => void; + } | { open: false } >({ open: false }); @@ -103,6 +103,7 @@ export default function PrivatePage() { "6b9fc200-4726-4973-86c9-cd526f5ce5db", // Replicate "53c25cb8-e3ee-465c-a4d1-e75a4c899c2a", // OpenAI "24e5d942-d9e3-4798-8151-90143ee55629", // Anthropic + "aad82a89-9794-4ebb-977f-d736aa5260a3", // AI/ML "4ec22295-8f97-4dd1-b42b-2c6957a02545", // Groq "7f7b0654-c36b-4565-8fa7-9a52575dfae2", // D-ID "7f26de70-ba0d-494e-ba76-238e65e7b45f", // Jina @@ -123,16 +124,16 @@ export default function PrivatePage() { const allCredentials = providers ? Object.values(providers).flatMap((provider) => - [...provider.savedOAuthCredentials, ...provider.savedApiKeys] - .filter((cred) => !hiddenCredentials.includes(cred.id)) - .map((credentials) => ({ - ...credentials, - provider: provider.provider, - providerName: provider.providerName, - ProviderIcon: providerIcons[provider.provider], - TypeIcon: { oauth2: IconUser, api_key: IconKey }[credentials.type], - })), - ) + [...provider.savedOAuthCredentials, ...provider.savedApiKeys] + .filter((cred) => !hiddenCredentials.includes(cred.id)) + .map((credentials) => ({ + ...credentials, + provider: provider.provider, + providerName: provider.providerName, + ProviderIcon: providerIcons[provider.provider], + TypeIcon: { oauth2: IconUser, api_key: IconKey }[credentials.type], + })), + ) : []; return ( diff --git a/autogpt_platform/frontend/src/app/store/(user)/integrations/page.tsx b/autogpt_platform/frontend/src/app/store/(user)/integrations/page.tsx index a4fa36ab29e3..3ffbcdd070d7 100644 --- a/autogpt_platform/frontend/src/app/store/(user)/integrations/page.tsx +++ b/autogpt_platform/frontend/src/app/store/(user)/integrations/page.tsx @@ -38,11 +38,11 @@ export default function PrivatePage() { const [confirmationDialogState, setConfirmationDialogState] = useState< | { - open: true; - message: string; - onConfirm: () => void; - onReject: () => void; - } + open: true; + message: string; + onConfirm: () => void; + onReject: () => void; + } | { open: false } >({ open: false }); @@ -103,6 +103,7 @@ export default function PrivatePage() { "6b9fc200-4726-4973-86c9-cd526f5ce5db", // Replicate "53c25cb8-e3ee-465c-a4d1-e75a4c899c2a", // OpenAI "24e5d942-d9e3-4798-8151-90143ee55629", // Anthropic + "aad82a89-9794-4ebb-977f-d736aa5260a3", // AI/ML "4ec22295-8f97-4dd1-b42b-2c6957a02545", // Groq "7f7b0654-c36b-4565-8fa7-9a52575dfae2", // D-ID "7f26de70-ba0d-494e-ba76-238e65e7b45f", // Jina @@ -123,16 +124,16 @@ export default function PrivatePage() { const allCredentials = providers ? Object.values(providers).flatMap((provider) => - [...provider.savedOAuthCredentials, ...provider.savedApiKeys] - .filter((cred) => !hiddenCredentials.includes(cred.id)) - .map((credentials) => ({ - ...credentials, - provider: provider.provider, - providerName: provider.providerName, - ProviderIcon: providerIcons[provider.provider], - TypeIcon: { oauth2: IconUser, api_key: IconKey }[credentials.type], - })), - ) + [...provider.savedOAuthCredentials, ...provider.savedApiKeys] + .filter((cred) => !hiddenCredentials.includes(cred.id)) + .map((credentials) => ({ + ...credentials, + provider: provider.provider, + providerName: provider.providerName, + ProviderIcon: providerIcons[provider.provider], + TypeIcon: { oauth2: IconUser, api_key: IconKey }[credentials.type], + })), + ) : []; return ( diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx index afd5949a51ee..dd3c8fe6cdd6 100644 --- a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx +++ b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx @@ -52,6 +52,7 @@ export const providerIcons: Record< github: FaGithub, google: FaGoogle, groq: fallbackIcon, + aiml: fallbackIcon, notion: NotionLogoIcon, discord: FaDiscord, d_id: fallbackIcon, @@ -76,14 +77,14 @@ export const providerIcons: Record< export type OAuthPopupResultMessage = { message_type: "oauth_popup_result" } & ( | { - success: true; - code: string; - state: string; - } + success: true; + code: string; + state: string; + } | { - success: false; - message: string; - } + success: false; + message: string; + } ); export const CredentialsInput: FC<{ @@ -177,8 +178,7 @@ export const CredentialsInput: FC<{ console.error("Error in OAuth callback:", error); setOAuthError( // type of error is unkown so we need to use String(error) - `Error in OAuth callback: ${ - error instanceof Error ? error.message : String(error) + `Error in OAuth callback: ${error instanceof Error ? error.message : String(error) }`, ); } finally { diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx index 6762f99ad621..b1b3d0f882cc 100644 --- a/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx +++ b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx @@ -16,6 +16,7 @@ const CREDENTIALS_PROVIDER_NAMES = Object.values( // --8<-- [start:CredentialsProviderNames] const providerDisplayNames: Record = { + aiml: "AI/ML", anthropic: "Anthropic", discord: "Discord", d_id: "D-ID", diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index 9872ace436dc..09e31a70f381 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -102,6 +102,7 @@ export type CredentialsType = "api_key" | "oauth2"; // --8<-- [start:BlockIOCredentialsSubSchema] export const PROVIDER_NAMES = { ANTHROPIC: "anthropic", + AIML: "aiml", D_ID: "d_id", DISCORD: "discord", E2B: "e2b", @@ -149,16 +150,16 @@ export type BlockIONullSubSchema = BlockIOSubSchemaMeta & { type BlockIOCombinedTypeSubSchema = BlockIOSubSchemaMeta & ( | { - allOf: [BlockIOSimpleTypeSubSchema]; - } + allOf: [BlockIOSimpleTypeSubSchema]; + } | { - anyOf: BlockIOSimpleTypeSubSchema[]; - default?: string | number | boolean | null; - } + anyOf: BlockIOSimpleTypeSubSchema[]; + default?: string | number | boolean | null; + } | { - oneOf: BlockIOSimpleTypeSubSchema[]; - default?: string | number | boolean | null; - } + oneOf: BlockIOSimpleTypeSubSchema[]; + default?: string | number | boolean | null; + } ); /* Mirror of backend/data/graph.py:Node */ diff --git a/classic/forge/forge/llm/providers/aiml.py b/classic/forge/forge/llm/providers/aiml.py new file mode 100644 index 000000000000..f6052837f420 --- /dev/null +++ b/classic/forge/forge/llm/providers/aiml.py @@ -0,0 +1,135 @@ +from __future__ import annotations + +import enum +import logging +from typing import Any, Optional + +import tiktoken +from pydantic import SecretStr + +from forge.models.config import UserConfigurable + +from ._openai_base import BaseOpenAIChatProvider +from .schema import ( + ChatModelInfo, + ModelProviderBudget, + ModelProviderConfiguration, + ModelProviderCredentials, + ModelProviderName, + ModelProviderSettings, + ModelTokenizer, +) + + +class AimlModelName(str, enum.Enum): + AIML_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo" + AIML_LLAMA3_1_70B = "nvidia/llama-3.1-nemotron-70b-instruct" + AIML_LLAMA3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo" + AIML_LLAMA_3_2_3B = "meta-llama/Llama-3.2-3B-Instruct-Turbo" + AIML_META_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" + + +AIML_CHAT_MODELS = { + info.name: info + for info in [ + ChatModelInfo( + name=AimlModelName.AIML_QWEN2_5_72B, + provider_name=ModelProviderName.AIML, + prompt_token_cost=1.26 / 1e6, + completion_token_cost=1.26 / 1e6, + max_tokens=32000, + has_function_call_api=False, + ), + ChatModelInfo( + name=AimlModelName.AIML_LLAMA3_1_70B, + provider_name=ModelProviderName.AIML, + prompt_token_cost=0.368 / 1e6, + completion_token_cost=0.42 / 1e6, + max_tokens=128000, + has_function_call_api=False, + ), + ChatModelInfo( + name=AimlModelName.AIML_LLAMA3_3_70B, + provider_name=ModelProviderName.AIML, + prompt_token_cost=0.924 / 1e6, + completion_token_cost=0.924 / 1e6, + max_tokens=128000, + has_function_call_api=False, + ), + ChatModelInfo( + name=AimlModelName.AIML_META_LLAMA_3_1_70B, + provider_name=ModelProviderName.AIML, + prompt_token_cost=0.063 / 1e6, + completion_token_cost=0.063 / 1e6, + max_tokens=131000, + has_function_call_api=False, + ), + ChatModelInfo( + name=AimlModelName.AIML_LLAMA_3_2_3B, + provider_name=ModelProviderName.AIML, + prompt_token_cost=0.924 / 1e6, + completion_token_cost=0.924 / 1e6, + max_tokens=128000, + has_function_call_api=False, + ), + ] +} + + +class AimlCredentials(ModelProviderCredentials): + """Credentials for Aiml.""" + + api_key: SecretStr = UserConfigurable(from_env="AIML_API_KEY") # type: ignore + api_base: Optional[SecretStr] = UserConfigurable( + default=None, from_env="AIML_API_BASE_URL" + ) + + def get_api_access_kwargs(self) -> dict[str, str]: + return { + k: v.get_secret_value() + for k, v in { + "api_key": self.api_key, + "base_url": self.api_base, + }.items() + if v is not None + } + + +class AimlSettings(ModelProviderSettings): + credentials: Optional[AimlCredentials] # type: ignore + budget: ModelProviderBudget # type: ignore + + +class AimlProvider(BaseOpenAIChatProvider[AimlModelName, AimlSettings]): + CHAT_MODELS = AIML_CHAT_MODELS + MODELS = CHAT_MODELS + + default_settings = AimlSettings( + name="aiml_provider", + description="Provides access to AIML's API.", + configuration=ModelProviderConfiguration(), + credentials=None, + budget=ModelProviderBudget(), + ) + + _settings: AimlSettings + _configuration: ModelProviderConfiguration + _credentials: AimlCredentials + _budget: ModelProviderBudget + + def __init__( + self, + settings: Optional[AimlSettings] = None, + logger: Optional[logging.Logger] = None, + ): + super(AimlProvider, self).__init__(settings=settings, logger=logger) + + from openai import AsyncOpenAI + + self._client = AsyncOpenAI( + **self._credentials.get_api_access_kwargs() # type: ignore + ) + + def get_tokenizer(self, model_name: AimlModelName) -> ModelTokenizer[Any]: + # HACK: No official tokenizer is available for AIML + return tiktoken.encoding_for_model("gpt-3.5-turbo") diff --git a/classic/forge/forge/llm/providers/multi.py b/classic/forge/forge/llm/providers/multi.py index e6accfff7906..5928b49696f2 100644 --- a/classic/forge/forge/llm/providers/multi.py +++ b/classic/forge/forge/llm/providers/multi.py @@ -5,6 +5,7 @@ from pydantic import ValidationError +from .aiml import AIML_CHAT_MODELS, AimlModelName, AimlProvider from .anthropic import ANTHROPIC_CHAT_MODELS, AnthropicModelName, AnthropicProvider from .groq import GROQ_CHAT_MODELS, GroqModelName, GroqProvider from .llamafile import LLAMAFILE_CHAT_MODELS, LlamafileModelName, LlamafileProvider @@ -25,10 +26,17 @@ _T = TypeVar("_T") -ModelName = AnthropicModelName | GroqModelName | LlamafileModelName | OpenAIModelName +ModelName = ( + AnthropicModelName + | AimlModelName + | GroqModelName + | LlamafileModelName + | OpenAIModelName +) EmbeddingModelProvider = OpenAIProvider CHAT_MODELS = { + **AIML_CHAT_MODELS, **ANTHROPIC_CHAT_MODELS, **GROQ_CHAT_MODELS, **LLAMAFILE_CHAT_MODELS, @@ -85,14 +93,16 @@ def get_tokenizer(self, model_name: ModelName) -> ModelTokenizer[Any]: def count_tokens(self, text: str, model_name: ModelName) -> int: return self.get_model_provider(model_name).count_tokens( - text=text, model_name=model_name # type: ignore + text=text, + model_name=model_name, # type: ignore ) def count_message_tokens( self, messages: ChatMessage | list[ChatMessage], model_name: ModelName ) -> int: return self.get_model_provider(model_name).count_message_tokens( - messages=messages, model_name=model_name # type: ignore + messages=messages, + model_name=model_name, # type: ignore ) async def create_chat_completion( @@ -168,7 +178,8 @@ def _get_provider(self, provider_name: ModelProviderName) -> ChatModelProvider: ) self._provider_instances[provider_name] = _provider = Provider( - settings=settings, logger=self._logger # type: ignore + settings=settings, + logger=self._logger, # type: ignore ) _provider._budget = self._budget # Object binding not preserved by Pydantic self._logger.debug(f"Initialized {Provider.__name__}!") @@ -181,6 +192,7 @@ def _get_provider_class( try: return { ModelProviderName.ANTHROPIC: AnthropicProvider, + ModelProviderName.AIML: AimlProvider, ModelProviderName.GROQ: GroqProvider, ModelProviderName.LLAMAFILE: LlamafileProvider, ModelProviderName.OPENAI: OpenAIProvider, @@ -194,6 +206,7 @@ def __repr__(self): ChatModelProvider = ( AnthropicProvider + | AimlProvider | GroqProvider | LlamafileProvider | OpenAIProvider diff --git a/classic/forge/forge/llm/providers/schema.py b/classic/forge/forge/llm/providers/schema.py index 2ca7b23e1de5..368acbee13b4 100644 --- a/classic/forge/forge/llm/providers/schema.py +++ b/classic/forge/forge/llm/providers/schema.py @@ -56,6 +56,7 @@ class ModelProviderName(str, enum.Enum): ANTHROPIC = "anthropic" GROQ = "groq" LLAMAFILE = "llamafile" + AIML = "aiml" class ChatMessage(BaseModel): @@ -301,20 +302,16 @@ def __init__( @abc.abstractmethod async def get_available_models( self, - ) -> Sequence["ChatModelInfo[_ModelName] | EmbeddingModelInfo[_ModelName]"]: - ... + ) -> Sequence["ChatModelInfo[_ModelName] | EmbeddingModelInfo[_ModelName]"]: ... @abc.abstractmethod - def count_tokens(self, text: str, model_name: _ModelName) -> int: - ... + def count_tokens(self, text: str, model_name: _ModelName) -> int: ... @abc.abstractmethod - def get_tokenizer(self, model_name: _ModelName) -> "ModelTokenizer[Any]": - ... + def get_tokenizer(self, model_name: _ModelName) -> "ModelTokenizer[Any]": ... @abc.abstractmethod - def get_token_limit(self, model_name: _ModelName) -> int: - ... + def get_token_limit(self, model_name: _ModelName) -> int: ... def get_incurred_cost(self) -> float: if self._budget: @@ -331,12 +328,10 @@ class ModelTokenizer(Protocol, Generic[_T]): """A ModelTokenizer provides tokenization specific to a model.""" @abc.abstractmethod - def encode(self, text: str) -> list[_T]: - ... + def encode(self, text: str) -> list[_T]: ... @abc.abstractmethod - def decode(self, tokens: list[_T]) -> str: - ... + def decode(self, tokens: list[_T]) -> str: ... #################### @@ -363,8 +358,7 @@ class BaseEmbeddingModelProvider(BaseModelProvider[_ModelName, _ModelProviderSet @abc.abstractmethod async def get_available_embedding_models( self, - ) -> Sequence[EmbeddingModelInfo[_ModelName]]: - ... + ) -> Sequence[EmbeddingModelInfo[_ModelName]]: ... @abc.abstractmethod async def create_embedding( @@ -373,8 +367,7 @@ async def create_embedding( model_name: _ModelName, embedding_parser: Callable[[Embedding], Embedding], **kwargs, - ) -> EmbeddingModelResponse: - ... + ) -> EmbeddingModelResponse: ... ############### @@ -399,16 +392,16 @@ class ChatModelResponse(ModelResponse, Generic[_T]): class BaseChatModelProvider(BaseModelProvider[_ModelName, _ModelProviderSettings]): @abc.abstractmethod - async def get_available_chat_models(self) -> Sequence[ChatModelInfo[_ModelName]]: - ... + async def get_available_chat_models( + self, + ) -> Sequence[ChatModelInfo[_ModelName]]: ... @abc.abstractmethod def count_message_tokens( self, messages: ChatMessage | list[ChatMessage], model_name: _ModelName, - ) -> int: - ... + ) -> int: ... @abc.abstractmethod async def create_chat_completion( @@ -420,5 +413,4 @@ async def create_chat_completion( max_output_tokens: Optional[int] = None, prefill_response: str = "", **kwargs, - ) -> ChatModelResponse[_T]: - ... + ) -> ChatModelResponse[_T]: ... diff --git a/docs/content/classic/configuration/options.md b/docs/content/classic/configuration/options.md index 9fb18c03e7ab..7806bd59590d 100644 --- a/docs/content/classic/configuration/options.md +++ b/docs/content/classic/configuration/options.md @@ -7,6 +7,7 @@ You can set configuration variables via the `.env` file. If you don't have a `.e - `AUTHORISE_COMMAND_KEY`: Key response accepted when authorising commands. Default: y - `ANTHROPIC_API_KEY`: Set this if you want to use Anthropic models with AutoGPT +- `AIML_API_KEY`: Set this if you want to use AI/ML models with AutoGPT - `AZURE_CONFIG_FILE`: Location of the Azure Config file relative to the AutoGPT root directory. Default: azure.yaml - `COMPONENT_CONFIG_FILE`: Path to the component configuration file (json) for an agent. Optional - `DISABLED_COMMANDS`: Commands to disable. Use comma separated names of commands. See the list of commands from built-in components [here](../../forge/components/components.md). Default: None diff --git a/docs/content/classic/setup/index.md b/docs/content/classic/setup/index.md index 95f2e6f7a380..6206008b7b08 100644 --- a/docs/content/classic/setup/index.md +++ b/docs/content/classic/setup/index.md @@ -175,6 +175,19 @@ If you don't know which to choose, you can safely go with OpenAI*. [anthropic/models]: https://docs.anthropic.com/en/docs/models-overview +### AI/ML API +1. Make sure you have credits in your account: [Billing -> View Usage](https://aimlapi.com/app/billing?utm_source=autogpt&utm_medium=autogpt&utm_campaign=autogpt) +2. Get your AI/ML API key from [Key Management](https://aimlapi.com/app/keys?utm_source=autogpt&utm_medium=autogpt&utm_campaign=autogpt) +3. Open `.env` +4. Find the line that says `AIML_API_KEY=` +5. Insert your Aiml API Key directly after = without quotes or spaces: + ```ini + AIML_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + ``` +6. If you would like to see more of our [models](https://aimlapi.com/models?utm_source=autogpt&utm_medium=autogpt&utm_campaign=autogpt) here or have any questions, we would welcome your [feedback](https://discord.gg/j5QggeZJgY)! +***See you later! With love, your AI/ML team***❤️ + + ### Groq !!! note diff --git a/docs/content/index.md b/docs/content/index.md index 243b56ea4193..cd698443644b 100644 --- a/docs/content/index.md +++ b/docs/content/index.md @@ -66,6 +66,7 @@ The platform comes pre-integrated with cutting-edge LLM providers: - OpenAI - Anthropic +- AI/ML API - Groq - Llama From 0d6202c78c04caa5e5c9db583cf8836d9bda4dee Mon Sep 17 00:00:00 2001 From: Ivan Date: Thu, 2 Jan 2025 13:14:14 +0400 Subject: [PATCH 03/14] Fix imports at config.py, minor changes at readme --- classic/original_autogpt/autogpt/app/config.py | 1 + docs/content/classic/setup/index.md | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/classic/original_autogpt/autogpt/app/config.py b/classic/original_autogpt/autogpt/app/config.py index 0fa21eb07d58..2b49fb5af188 100644 --- a/classic/original_autogpt/autogpt/app/config.py +++ b/classic/original_autogpt/autogpt/app/config.py @@ -1,4 +1,5 @@ """Configuration class to store the state of bools for different scripts access.""" + from __future__ import annotations import logging diff --git a/docs/content/classic/setup/index.md b/docs/content/classic/setup/index.md index 6206008b7b08..015fa7ebd063 100644 --- a/docs/content/classic/setup/index.md +++ b/docs/content/classic/setup/index.md @@ -176,15 +176,15 @@ If you don't know which to choose, you can safely go with OpenAI*. ### AI/ML API -1. Make sure you have credits in your account: [Billing -> View Usage](https://aimlapi.com/app/billing?utm_source=autogpt&utm_medium=autogpt&utm_campaign=autogpt) -2. Get your AI/ML API key from [Key Management](https://aimlapi.com/app/keys?utm_source=autogpt&utm_medium=autogpt&utm_campaign=autogpt) +1. Make sure you have credits in your account: [Billing -> View Usage](https://aimlapi.com/app/billing?utm_source=autogpt&utm_medium=github&utm_campaign=integration) +2. Get your AI/ML API key from [Key Management](https://aimlapi.com/app/keys?utm_source=autogpt&utm_medium=github&utm_campaign=integration) 3. Open `.env` 4. Find the line that says `AIML_API_KEY=` -5. Insert your Aiml API Key directly after = without quotes or spaces: +5. Insert your AI/ML API Key directly after = without quotes or spaces: ```ini AIML_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ``` -6. If you would like to see more of our [models](https://aimlapi.com/models?utm_source=autogpt&utm_medium=autogpt&utm_campaign=autogpt) here or have any questions, we would welcome your [feedback](https://discord.gg/j5QggeZJgY)! +6. If you would like to see more of our [models](https://aimlapi.com/models?utm_source=autogpt&utm_medium=github&utm_campaign=integration) here or have any questions, we would welcome your [feedback](https://discord.gg/j5QggeZJgY)! ***See you later! With love, your AI/ML team***❤️ From 10fedd6e4675fe36191f00b88f103fef17b7fe1e Mon Sep 17 00:00:00 2001 From: Ivan Date: Thu, 2 Jan 2025 13:23:15 +0400 Subject: [PATCH 04/14] Fix --- autogpt_platform/backend/backend/data/credit.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autogpt_platform/backend/backend/data/credit.py b/autogpt_platform/backend/backend/data/credit.py index 04a4154c5b13..b476f1f0d0b1 100644 --- a/autogpt_platform/backend/backend/data/credit.py +++ b/autogpt_platform/backend/backend/data/credit.py @@ -2,9 +2,9 @@ from datetime import datetime, timezone from prisma import Json -from prisma.enums import CreditTransactionType, UserBlockCreditType +from prisma.enums import CreditTransactionType from prisma.errors import UniqueViolationError -from prisma.models import CreditTransaction, UserBlockCredit +from prisma.models import CreditTransaction from backend.data.block import Block, BlockInput, get_block from backend.data.block_cost_config import BLOCK_COSTS From ff6fa8d68443153a2fdb747b267f4b78ff4c4dbb Mon Sep 17 00:00:00 2001 From: Ivan Date: Thu, 2 Jan 2025 13:51:10 +0400 Subject: [PATCH 05/14] Fix code style issues --- .../backend/backend/blocks/llm.py | 8 ++--- .../frontend/src/app/profile/page.tsx | 30 +++++++++---------- .../app/store/(user)/integrations/page.tsx | 30 +++++++++---------- .../integrations/credentials-input.tsx | 17 ++++++----- .../src/lib/autogpt-server-api/types.ts | 16 +++++----- 5 files changed, 51 insertions(+), 50 deletions(-) diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index 56033f10f85c..285588a25c02 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -457,7 +457,6 @@ def llm_call( max_tokens=max_tokens, ) - # response = completion.choices[0].message.content return ( completion.choices[0].message.content or "", completion.usage.prompt_tokens if completion.usage else 0, @@ -828,7 +827,9 @@ def _combine_summaries( chunk_overlap=input_data.chunk_overlap, ), credentials=credentials, - ).send(None)[1] # Get the first yielded value + ).send(None)[ + 1 + ] # Get the first yielded value class AIConversationBlock(Block): @@ -885,8 +886,7 @@ def __init__(self): "The 2020 World Series was played at Globe Life Field in Arlington, Texas.", ), test_mock={ - "llm_call": lambda *args, - **kwargs: "The 2020 World Series was played at Globe Life Field in Arlington, Texas." + "llm_call": lambda *args, **kwargs: "The 2020 World Series was played at Globe Life Field in Arlington, Texas." }, ) diff --git a/autogpt_platform/frontend/src/app/profile/page.tsx b/autogpt_platform/frontend/src/app/profile/page.tsx index add79ed2eb40..dbd29ffad213 100644 --- a/autogpt_platform/frontend/src/app/profile/page.tsx +++ b/autogpt_platform/frontend/src/app/profile/page.tsx @@ -38,11 +38,11 @@ export default function PrivatePage() { const [confirmationDialogState, setConfirmationDialogState] = useState< | { - open: true; - message: string; - onConfirm: () => void; - onReject: () => void; - } + open: true; + message: string; + onConfirm: () => void; + onReject: () => void; + } | { open: false } >({ open: false }); @@ -124,16 +124,16 @@ export default function PrivatePage() { const allCredentials = providers ? Object.values(providers).flatMap((provider) => - [...provider.savedOAuthCredentials, ...provider.savedApiKeys] - .filter((cred) => !hiddenCredentials.includes(cred.id)) - .map((credentials) => ({ - ...credentials, - provider: provider.provider, - providerName: provider.providerName, - ProviderIcon: providerIcons[provider.provider], - TypeIcon: { oauth2: IconUser, api_key: IconKey }[credentials.type], - })), - ) + [...provider.savedOAuthCredentials, ...provider.savedApiKeys] + .filter((cred) => !hiddenCredentials.includes(cred.id)) + .map((credentials) => ({ + ...credentials, + provider: provider.provider, + providerName: provider.providerName, + ProviderIcon: providerIcons[provider.provider], + TypeIcon: { oauth2: IconUser, api_key: IconKey }[credentials.type], + })), + ) : []; return ( diff --git a/autogpt_platform/frontend/src/app/store/(user)/integrations/page.tsx b/autogpt_platform/frontend/src/app/store/(user)/integrations/page.tsx index 3ffbcdd070d7..44b699b4bf56 100644 --- a/autogpt_platform/frontend/src/app/store/(user)/integrations/page.tsx +++ b/autogpt_platform/frontend/src/app/store/(user)/integrations/page.tsx @@ -38,11 +38,11 @@ export default function PrivatePage() { const [confirmationDialogState, setConfirmationDialogState] = useState< | { - open: true; - message: string; - onConfirm: () => void; - onReject: () => void; - } + open: true; + message: string; + onConfirm: () => void; + onReject: () => void; + } | { open: false } >({ open: false }); @@ -124,16 +124,16 @@ export default function PrivatePage() { const allCredentials = providers ? Object.values(providers).flatMap((provider) => - [...provider.savedOAuthCredentials, ...provider.savedApiKeys] - .filter((cred) => !hiddenCredentials.includes(cred.id)) - .map((credentials) => ({ - ...credentials, - provider: provider.provider, - providerName: provider.providerName, - ProviderIcon: providerIcons[provider.provider], - TypeIcon: { oauth2: IconUser, api_key: IconKey }[credentials.type], - })), - ) + [...provider.savedOAuthCredentials, ...provider.savedApiKeys] + .filter((cred) => !hiddenCredentials.includes(cred.id)) + .map((credentials) => ({ + ...credentials, + provider: provider.provider, + providerName: provider.providerName, + ProviderIcon: providerIcons[provider.provider], + TypeIcon: { oauth2: IconUser, api_key: IconKey }[credentials.type], + })), + ) : []; return ( diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx index dd3c8fe6cdd6..a055fefbde49 100644 --- a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx +++ b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx @@ -77,14 +77,14 @@ export const providerIcons: Record< export type OAuthPopupResultMessage = { message_type: "oauth_popup_result" } & ( | { - success: true; - code: string; - state: string; - } + success: true; + code: string; + state: string; + } | { - success: false; - message: string; - } + success: false; + message: string; + } ); export const CredentialsInput: FC<{ @@ -178,7 +178,8 @@ export const CredentialsInput: FC<{ console.error("Error in OAuth callback:", error); setOAuthError( // type of error is unkown so we need to use String(error) - `Error in OAuth callback: ${error instanceof Error ? error.message : String(error) + `Error in OAuth callback: ${ + error instanceof Error ? error.message : String(error) }`, ); } finally { diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index 09e31a70f381..fc4adcdeb5be 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -150,16 +150,16 @@ export type BlockIONullSubSchema = BlockIOSubSchemaMeta & { type BlockIOCombinedTypeSubSchema = BlockIOSubSchemaMeta & ( | { - allOf: [BlockIOSimpleTypeSubSchema]; - } + allOf: [BlockIOSimpleTypeSubSchema]; + } | { - anyOf: BlockIOSimpleTypeSubSchema[]; - default?: string | number | boolean | null; - } + anyOf: BlockIOSimpleTypeSubSchema[]; + default?: string | number | boolean | null; + } | { - oneOf: BlockIOSimpleTypeSubSchema[]; - default?: string | number | boolean | null; - } + oneOf: BlockIOSimpleTypeSubSchema[]; + default?: string | number | boolean | null; + } ); /* Mirror of backend/data/graph.py:Node */ From 142671e53e4b5794900c379b79578134ffe47c62 Mon Sep 17 00:00:00 2001 From: Aarushi <50577581+aarushik93@users.noreply.github.com> Date: Tue, 7 Jan 2025 10:39:06 +0000 Subject: [PATCH 06/14] Update autogpt_platform/backend/backend/blocks/llm.py Co-authored-by: Reinier van der Leer --- autogpt_platform/backend/backend/blocks/llm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index 285588a25c02..d97f7b89fec4 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -99,7 +99,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta): # Anthropic models CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest" CLAUDE_3_HAIKU = "claude-3-haiku-20240307" - # Aiml models + # AI/ML API models AIML_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo" AIML_LLAMA3_1_70B = "nvidia/llama-3.1-nemotron-70b-instruct" AIML_LLAMA3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo" From 996f8d433c36148abb5908fded5a96eb603c8fe1 Mon Sep 17 00:00:00 2001 From: Aarushi <50577581+aarushik93@users.noreply.github.com> Date: Tue, 7 Jan 2025 10:39:13 +0000 Subject: [PATCH 07/14] Update autogpt_platform/backend/backend/blocks/llm.py Co-authored-by: Reinier van der Leer --- autogpt_platform/backend/backend/blocks/llm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index d97f7b89fec4..c85ec718ad33 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -35,7 +35,7 @@ ProviderName.OLLAMA, ProviderName.OPENAI, ProviderName.OPEN_ROUTER, - ProviderName.AIML, + ProviderName.AIML_API, ] AICredentials = CredentialsMetaInput[LLMProviderName, Literal["api_key"]] From 8ea70812b3119bff568d120ef293308e67a8eb49 Mon Sep 17 00:00:00 2001 From: Aarushi <50577581+aarushik93@users.noreply.github.com> Date: Tue, 7 Jan 2025 10:39:33 +0000 Subject: [PATCH 08/14] Update autogpt_platform/backend/backend/integrations/credentials_store.py Co-authored-by: Reinier van der Leer --- .../backend/backend/integrations/credentials_store.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt_platform/backend/backend/integrations/credentials_store.py b/autogpt_platform/backend/backend/integrations/credentials_store.py index 1ae2d20e4cc7..2f149126d32b 100644 --- a/autogpt_platform/backend/backend/integrations/credentials_store.py +++ b/autogpt_platform/backend/backend/integrations/credentials_store.py @@ -53,7 +53,7 @@ id="aad82a89-9794-4ebb-977f-d736aa5260a3", provider="aiml", api_key=SecretStr(settings.secrets.aiml_api_key), - title="Use Credits for AI/ML", + title="Use Credits for AI/ML API", expires_at=None, ) anthropic_credentials = APIKeyCredentials( From d1644862f75b88cd49592002d02e5edb5410860b Mon Sep 17 00:00:00 2001 From: Ivan Date: Wed, 8 Jan 2025 15:19:31 +0400 Subject: [PATCH 09/14] Minor changes --- .../backend/backend/blocks/llm.py | 11 +- .../backend/integrations/credentials_store.py | 2 +- .../backend/backend/integrations/providers.py | 2 +- classic/forge/forge/llm/providers/aiml.py | 135 ------------------ classic/forge/forge/llm/providers/multi.py | 21 +-- classic/forge/forge/llm/providers/schema.py | 36 +++-- 6 files changed, 33 insertions(+), 174 deletions(-) delete mode 100644 classic/forge/forge/llm/providers/aiml.py diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index 285588a25c02..68fddb7ff776 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -35,7 +35,7 @@ ProviderName.OLLAMA, ProviderName.OPENAI, ProviderName.OPEN_ROUTER, - ProviderName.AIML, + ProviderName.AIML_API, ] AICredentials = CredentialsMetaInput[LLMProviderName, Literal["api_key"]] @@ -99,7 +99,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta): # Anthropic models CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest" CLAUDE_3_HAIKU = "claude-3-haiku-20240307" - # Aiml models + # AIML_API models AIML_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo" AIML_LLAMA3_1_70B = "nvidia/llama-3.1-nemotron-70b-instruct" AIML_LLAMA3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo" @@ -827,9 +827,7 @@ def _combine_summaries( chunk_overlap=input_data.chunk_overlap, ), credentials=credentials, - ).send(None)[ - 1 - ] # Get the first yielded value + ).send(None)[1] # Get the first yielded value class AIConversationBlock(Block): @@ -886,7 +884,8 @@ def __init__(self): "The 2020 World Series was played at Globe Life Field in Arlington, Texas.", ), test_mock={ - "llm_call": lambda *args, **kwargs: "The 2020 World Series was played at Globe Life Field in Arlington, Texas." + "llm_call": lambda *args, + **kwargs: "The 2020 World Series was played at Globe Life Field in Arlington, Texas." }, ) diff --git a/autogpt_platform/backend/backend/integrations/credentials_store.py b/autogpt_platform/backend/backend/integrations/credentials_store.py index 1ae2d20e4cc7..2f149126d32b 100644 --- a/autogpt_platform/backend/backend/integrations/credentials_store.py +++ b/autogpt_platform/backend/backend/integrations/credentials_store.py @@ -53,7 +53,7 @@ id="aad82a89-9794-4ebb-977f-d736aa5260a3", provider="aiml", api_key=SecretStr(settings.secrets.aiml_api_key), - title="Use Credits for AI/ML", + title="Use Credits for AI/ML API", expires_at=None, ) anthropic_credentials = APIKeyCredentials( diff --git a/autogpt_platform/backend/backend/integrations/providers.py b/autogpt_platform/backend/backend/integrations/providers.py index b6c0783ca23b..898662b5dc08 100644 --- a/autogpt_platform/backend/backend/integrations/providers.py +++ b/autogpt_platform/backend/backend/integrations/providers.py @@ -4,7 +4,7 @@ # --8<-- [start:ProviderName] class ProviderName(str, Enum): ANTHROPIC = "anthropic" - AIML = "aiml" + AIML_API = "aiml" COMPASS = "compass" DISCORD = "discord" D_ID = "d_id" diff --git a/classic/forge/forge/llm/providers/aiml.py b/classic/forge/forge/llm/providers/aiml.py deleted file mode 100644 index f6052837f420..000000000000 --- a/classic/forge/forge/llm/providers/aiml.py +++ /dev/null @@ -1,135 +0,0 @@ -from __future__ import annotations - -import enum -import logging -from typing import Any, Optional - -import tiktoken -from pydantic import SecretStr - -from forge.models.config import UserConfigurable - -from ._openai_base import BaseOpenAIChatProvider -from .schema import ( - ChatModelInfo, - ModelProviderBudget, - ModelProviderConfiguration, - ModelProviderCredentials, - ModelProviderName, - ModelProviderSettings, - ModelTokenizer, -) - - -class AimlModelName(str, enum.Enum): - AIML_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo" - AIML_LLAMA3_1_70B = "nvidia/llama-3.1-nemotron-70b-instruct" - AIML_LLAMA3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo" - AIML_LLAMA_3_2_3B = "meta-llama/Llama-3.2-3B-Instruct-Turbo" - AIML_META_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" - - -AIML_CHAT_MODELS = { - info.name: info - for info in [ - ChatModelInfo( - name=AimlModelName.AIML_QWEN2_5_72B, - provider_name=ModelProviderName.AIML, - prompt_token_cost=1.26 / 1e6, - completion_token_cost=1.26 / 1e6, - max_tokens=32000, - has_function_call_api=False, - ), - ChatModelInfo( - name=AimlModelName.AIML_LLAMA3_1_70B, - provider_name=ModelProviderName.AIML, - prompt_token_cost=0.368 / 1e6, - completion_token_cost=0.42 / 1e6, - max_tokens=128000, - has_function_call_api=False, - ), - ChatModelInfo( - name=AimlModelName.AIML_LLAMA3_3_70B, - provider_name=ModelProviderName.AIML, - prompt_token_cost=0.924 / 1e6, - completion_token_cost=0.924 / 1e6, - max_tokens=128000, - has_function_call_api=False, - ), - ChatModelInfo( - name=AimlModelName.AIML_META_LLAMA_3_1_70B, - provider_name=ModelProviderName.AIML, - prompt_token_cost=0.063 / 1e6, - completion_token_cost=0.063 / 1e6, - max_tokens=131000, - has_function_call_api=False, - ), - ChatModelInfo( - name=AimlModelName.AIML_LLAMA_3_2_3B, - provider_name=ModelProviderName.AIML, - prompt_token_cost=0.924 / 1e6, - completion_token_cost=0.924 / 1e6, - max_tokens=128000, - has_function_call_api=False, - ), - ] -} - - -class AimlCredentials(ModelProviderCredentials): - """Credentials for Aiml.""" - - api_key: SecretStr = UserConfigurable(from_env="AIML_API_KEY") # type: ignore - api_base: Optional[SecretStr] = UserConfigurable( - default=None, from_env="AIML_API_BASE_URL" - ) - - def get_api_access_kwargs(self) -> dict[str, str]: - return { - k: v.get_secret_value() - for k, v in { - "api_key": self.api_key, - "base_url": self.api_base, - }.items() - if v is not None - } - - -class AimlSettings(ModelProviderSettings): - credentials: Optional[AimlCredentials] # type: ignore - budget: ModelProviderBudget # type: ignore - - -class AimlProvider(BaseOpenAIChatProvider[AimlModelName, AimlSettings]): - CHAT_MODELS = AIML_CHAT_MODELS - MODELS = CHAT_MODELS - - default_settings = AimlSettings( - name="aiml_provider", - description="Provides access to AIML's API.", - configuration=ModelProviderConfiguration(), - credentials=None, - budget=ModelProviderBudget(), - ) - - _settings: AimlSettings - _configuration: ModelProviderConfiguration - _credentials: AimlCredentials - _budget: ModelProviderBudget - - def __init__( - self, - settings: Optional[AimlSettings] = None, - logger: Optional[logging.Logger] = None, - ): - super(AimlProvider, self).__init__(settings=settings, logger=logger) - - from openai import AsyncOpenAI - - self._client = AsyncOpenAI( - **self._credentials.get_api_access_kwargs() # type: ignore - ) - - def get_tokenizer(self, model_name: AimlModelName) -> ModelTokenizer[Any]: - # HACK: No official tokenizer is available for AIML - return tiktoken.encoding_for_model("gpt-3.5-turbo") diff --git a/classic/forge/forge/llm/providers/multi.py b/classic/forge/forge/llm/providers/multi.py index 5928b49696f2..e6accfff7906 100644 --- a/classic/forge/forge/llm/providers/multi.py +++ b/classic/forge/forge/llm/providers/multi.py @@ -5,7 +5,6 @@ from pydantic import ValidationError -from .aiml import AIML_CHAT_MODELS, AimlModelName, AimlProvider from .anthropic import ANTHROPIC_CHAT_MODELS, AnthropicModelName, AnthropicProvider from .groq import GROQ_CHAT_MODELS, GroqModelName, GroqProvider from .llamafile import LLAMAFILE_CHAT_MODELS, LlamafileModelName, LlamafileProvider @@ -26,17 +25,10 @@ _T = TypeVar("_T") -ModelName = ( - AnthropicModelName - | AimlModelName - | GroqModelName - | LlamafileModelName - | OpenAIModelName -) +ModelName = AnthropicModelName | GroqModelName | LlamafileModelName | OpenAIModelName EmbeddingModelProvider = OpenAIProvider CHAT_MODELS = { - **AIML_CHAT_MODELS, **ANTHROPIC_CHAT_MODELS, **GROQ_CHAT_MODELS, **LLAMAFILE_CHAT_MODELS, @@ -93,16 +85,14 @@ def get_tokenizer(self, model_name: ModelName) -> ModelTokenizer[Any]: def count_tokens(self, text: str, model_name: ModelName) -> int: return self.get_model_provider(model_name).count_tokens( - text=text, - model_name=model_name, # type: ignore + text=text, model_name=model_name # type: ignore ) def count_message_tokens( self, messages: ChatMessage | list[ChatMessage], model_name: ModelName ) -> int: return self.get_model_provider(model_name).count_message_tokens( - messages=messages, - model_name=model_name, # type: ignore + messages=messages, model_name=model_name # type: ignore ) async def create_chat_completion( @@ -178,8 +168,7 @@ def _get_provider(self, provider_name: ModelProviderName) -> ChatModelProvider: ) self._provider_instances[provider_name] = _provider = Provider( - settings=settings, - logger=self._logger, # type: ignore + settings=settings, logger=self._logger # type: ignore ) _provider._budget = self._budget # Object binding not preserved by Pydantic self._logger.debug(f"Initialized {Provider.__name__}!") @@ -192,7 +181,6 @@ def _get_provider_class( try: return { ModelProviderName.ANTHROPIC: AnthropicProvider, - ModelProviderName.AIML: AimlProvider, ModelProviderName.GROQ: GroqProvider, ModelProviderName.LLAMAFILE: LlamafileProvider, ModelProviderName.OPENAI: OpenAIProvider, @@ -206,7 +194,6 @@ def __repr__(self): ChatModelProvider = ( AnthropicProvider - | AimlProvider | GroqProvider | LlamafileProvider | OpenAIProvider diff --git a/classic/forge/forge/llm/providers/schema.py b/classic/forge/forge/llm/providers/schema.py index 368acbee13b4..2ca7b23e1de5 100644 --- a/classic/forge/forge/llm/providers/schema.py +++ b/classic/forge/forge/llm/providers/schema.py @@ -56,7 +56,6 @@ class ModelProviderName(str, enum.Enum): ANTHROPIC = "anthropic" GROQ = "groq" LLAMAFILE = "llamafile" - AIML = "aiml" class ChatMessage(BaseModel): @@ -302,16 +301,20 @@ def __init__( @abc.abstractmethod async def get_available_models( self, - ) -> Sequence["ChatModelInfo[_ModelName] | EmbeddingModelInfo[_ModelName]"]: ... + ) -> Sequence["ChatModelInfo[_ModelName] | EmbeddingModelInfo[_ModelName]"]: + ... @abc.abstractmethod - def count_tokens(self, text: str, model_name: _ModelName) -> int: ... + def count_tokens(self, text: str, model_name: _ModelName) -> int: + ... @abc.abstractmethod - def get_tokenizer(self, model_name: _ModelName) -> "ModelTokenizer[Any]": ... + def get_tokenizer(self, model_name: _ModelName) -> "ModelTokenizer[Any]": + ... @abc.abstractmethod - def get_token_limit(self, model_name: _ModelName) -> int: ... + def get_token_limit(self, model_name: _ModelName) -> int: + ... def get_incurred_cost(self) -> float: if self._budget: @@ -328,10 +331,12 @@ class ModelTokenizer(Protocol, Generic[_T]): """A ModelTokenizer provides tokenization specific to a model.""" @abc.abstractmethod - def encode(self, text: str) -> list[_T]: ... + def encode(self, text: str) -> list[_T]: + ... @abc.abstractmethod - def decode(self, tokens: list[_T]) -> str: ... + def decode(self, tokens: list[_T]) -> str: + ... #################### @@ -358,7 +363,8 @@ class BaseEmbeddingModelProvider(BaseModelProvider[_ModelName, _ModelProviderSet @abc.abstractmethod async def get_available_embedding_models( self, - ) -> Sequence[EmbeddingModelInfo[_ModelName]]: ... + ) -> Sequence[EmbeddingModelInfo[_ModelName]]: + ... @abc.abstractmethod async def create_embedding( @@ -367,7 +373,8 @@ async def create_embedding( model_name: _ModelName, embedding_parser: Callable[[Embedding], Embedding], **kwargs, - ) -> EmbeddingModelResponse: ... + ) -> EmbeddingModelResponse: + ... ############### @@ -392,16 +399,16 @@ class ChatModelResponse(ModelResponse, Generic[_T]): class BaseChatModelProvider(BaseModelProvider[_ModelName, _ModelProviderSettings]): @abc.abstractmethod - async def get_available_chat_models( - self, - ) -> Sequence[ChatModelInfo[_ModelName]]: ... + async def get_available_chat_models(self) -> Sequence[ChatModelInfo[_ModelName]]: + ... @abc.abstractmethod def count_message_tokens( self, messages: ChatMessage | list[ChatMessage], model_name: _ModelName, - ) -> int: ... + ) -> int: + ... @abc.abstractmethod async def create_chat_completion( @@ -413,4 +420,5 @@ async def create_chat_completion( max_output_tokens: Optional[int] = None, prefill_response: str = "", **kwargs, - ) -> ChatModelResponse[_T]: ... + ) -> ChatModelResponse[_T]: + ... From 41414a723feacd19bf2d815e3fef679059eb50a0 Mon Sep 17 00:00:00 2001 From: Ivan Date: Wed, 8 Jan 2025 15:29:31 +0400 Subject: [PATCH 10/14] Lint fixes --- autogpt_platform/backend/backend/blocks/llm.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index 6c669d2b7ddb..c85ec718ad33 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -827,7 +827,9 @@ def _combine_summaries( chunk_overlap=input_data.chunk_overlap, ), credentials=credentials, - ).send(None)[1] # Get the first yielded value + ).send(None)[ + 1 + ] # Get the first yielded value class AIConversationBlock(Block): @@ -884,8 +886,7 @@ def __init__(self): "The 2020 World Series was played at Globe Life Field in Arlington, Texas.", ), test_mock={ - "llm_call": lambda *args, - **kwargs: "The 2020 World Series was played at Globe Life Field in Arlington, Texas." + "llm_call": lambda *args, **kwargs: "The 2020 World Series was played at Globe Life Field in Arlington, Texas." }, ) From 847ffe5f4f46ce60caaeee5c001cd75a8c089a92 Mon Sep 17 00:00:00 2001 From: Ivan Date: Fri, 10 Jan 2025 13:22:39 +0400 Subject: [PATCH 11/14] Minor changes --- .../backend/backend/blocks/llm.py | 29 ++++++++--------- .../backend/backend/data/block_cost_config.py | 20 ++++++------ .../backend/integrations/credentials_store.py | 8 ++--- .../backend/backend/integrations/providers.py | 2 +- .../backend/backend/util/settings.py | 2 +- .../frontend/src/app/profile/page.tsx | 32 +++++++++---------- .../integrations/credentials-input.tsx | 19 ++++++----- .../integrations/credentials-provider.tsx | 2 +- .../src/lib/autogpt-server-api/types.ts | 18 +++++------ 9 files changed, 65 insertions(+), 67 deletions(-) diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index c85ec718ad33..f30b44b01cd6 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -100,11 +100,11 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta): CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest" CLAUDE_3_HAIKU = "claude-3-haiku-20240307" # AI/ML API models - AIML_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo" - AIML_LLAMA3_1_70B = "nvidia/llama-3.1-nemotron-70b-instruct" - AIML_LLAMA3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo" - AIML_META_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" - AIML_LLAMA_3_2_3B = "meta-llama/Llama-3.2-3B-Instruct-Turbo" + AIML_API_QWEN2_5_72B = "Qwen/Qwen2.5-72B-Instruct-Turbo" + AIML_API_LLAMA3_1_70B = "nvidia/llama-3.1-nemotron-70b-instruct" + AIML_API_LLAMA3_3_70B = "meta-llama/Llama-3.3-70B-Instruct-Turbo" + AIML_API_META_LLAMA_3_1_70B = "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo" + AIML_API_LLAMA_3_2_3B = "meta-llama/Llama-3.2-3B-Instruct-Turbo" # Groq models LLAMA3_8B = "llama3-8b-8192" LLAMA3_70B = "llama3-70b-8192" @@ -161,11 +161,11 @@ def context_window(self) -> int: LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385), LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000), LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000), - LlmModel.AIML_QWEN2_5_72B: ModelMetadata("aiml", 32000), - LlmModel.AIML_LLAMA3_1_70B: ModelMetadata("aiml", 128000), - LlmModel.AIML_LLAMA3_3_70B: ModelMetadata("aiml", 128000), - LlmModel.AIML_META_LLAMA_3_1_70B: ModelMetadata("aiml", 131000), - LlmModel.AIML_LLAMA_3_2_3B: ModelMetadata("aiml", 128000), + LlmModel.AIML_API_QWEN2_5_72B: ModelMetadata("aiml_api", 32000), + LlmModel.AIML_API_LLAMA3_1_70B: ModelMetadata("aiml_api", 128000), + LlmModel.AIML_API_LLAMA3_3_70B: ModelMetadata("aiml_api", 128000), + LlmModel.AIML_API_META_LLAMA_3_1_70B: ModelMetadata("aiml_api", 131000), + LlmModel.AIML_API_LLAMA_3_2_3B: ModelMetadata("aiml_api", 128000), LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192), LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192), LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768), @@ -445,7 +445,7 @@ def llm_call( response.usage.prompt_tokens if response.usage else 0, response.usage.completion_tokens if response.usage else 0, ) - elif provider == "aiml": + elif provider == "aiml_api": client = openai.OpenAI( base_url="https://api.aimlapi.com/v2", api_key=credentials.api_key.get_secret_value(), @@ -827,9 +827,7 @@ def _combine_summaries( chunk_overlap=input_data.chunk_overlap, ), credentials=credentials, - ).send(None)[ - 1 - ] # Get the first yielded value + ).send(None)[1] # Get the first yielded value class AIConversationBlock(Block): @@ -886,7 +884,8 @@ def __init__(self): "The 2020 World Series was played at Globe Life Field in Arlington, Texas.", ), test_mock={ - "llm_call": lambda *args, **kwargs: "The 2020 World Series was played at Globe Life Field in Arlington, Texas." + "llm_call": lambda *args, + **kwargs: "The 2020 World Series was played at Globe Life Field in Arlington, Texas." }, ) diff --git a/autogpt_platform/backend/backend/data/block_cost_config.py b/autogpt_platform/backend/backend/data/block_cost_config.py index ec48d5d3c023..728227025ebd 100644 --- a/autogpt_platform/backend/backend/data/block_cost_config.py +++ b/autogpt_platform/backend/backend/data/block_cost_config.py @@ -20,7 +20,7 @@ from backend.data.block import Block from backend.data.cost import BlockCost, BlockCostType from backend.integrations.credentials_store import ( - aiml_credentials, + aiml_api_credentials, anthropic_credentials, did_credentials, groq_credentials, @@ -44,11 +44,11 @@ LlmModel.GPT3_5_TURBO: 1, LlmModel.CLAUDE_3_5_SONNET: 4, LlmModel.CLAUDE_3_HAIKU: 1, - LlmModel.AIML_QWEN2_5_72B: 1, - LlmModel.AIML_LLAMA3_1_70B: 1, - LlmModel.AIML_LLAMA3_3_70B: 1, - LlmModel.AIML_META_LLAMA_3_1_70B: 1, - LlmModel.AIML_LLAMA_3_2_3B: 1, + LlmModel.AIML_API_QWEN2_5_72B: 1, + LlmModel.AIML_API_LLAMA3_1_70B: 1, + LlmModel.AIML_API_LLAMA3_3_70B: 1, + LlmModel.AIML_API_META_LLAMA_3_1_70B: 1, + LlmModel.AIML_API_LLAMA_3_2_3B: 1, LlmModel.LLAMA3_8B: 1, LlmModel.LLAMA3_70B: 1, LlmModel.MIXTRAL_8X7B: 1, @@ -155,15 +155,15 @@ cost_filter={ "model": model, "credentials": { - "id": aiml_credentials.id, - "provider": aiml_credentials.provider, - "type": aiml_credentials.type, + "id": aiml_api_credentials.id, + "provider": aiml_api_credentials.provider, + "type": aiml_api_credentials.type, }, }, cost_amount=cost, ) for model, cost in MODEL_COST.items() - if MODEL_METADATA[model].provider == "aiml" + if MODEL_METADATA[model].provider == "aiml_api" ] ) diff --git a/autogpt_platform/backend/backend/integrations/credentials_store.py b/autogpt_platform/backend/backend/integrations/credentials_store.py index 2f149126d32b..097cf7147965 100644 --- a/autogpt_platform/backend/backend/integrations/credentials_store.py +++ b/autogpt_platform/backend/backend/integrations/credentials_store.py @@ -49,9 +49,9 @@ title="Use Credits for OpenAI", expires_at=None, ) -aiml_credentials = APIKeyCredentials( +aiml_api_credentials = APIKeyCredentials( id="aad82a89-9794-4ebb-977f-d736aa5260a3", - provider="aiml", + provider="aiml_api", api_key=SecretStr(settings.secrets.aiml_api_key), title="Use Credits for AI/ML API", expires_at=None, @@ -105,7 +105,7 @@ ideogram_credentials, replicate_credentials, openai_credentials, - aiml_credentials, + aiml_api_credentials, anthropic_credentials, groq_credentials, did_credentials, @@ -154,7 +154,7 @@ def get_all_creds(self, user_id: str) -> list[Credentials]: if settings.secrets.openai_api_key: all_credentials.append(openai_credentials) if settings.secrets.aiml_api_key: - all_credentials.append(aiml_credentials) + all_credentials.append(aiml_api_credentials) if settings.secrets.anthropic_api_key: all_credentials.append(anthropic_credentials) if settings.secrets.did_api_key: diff --git a/autogpt_platform/backend/backend/integrations/providers.py b/autogpt_platform/backend/backend/integrations/providers.py index 898662b5dc08..36642cc9bab4 100644 --- a/autogpt_platform/backend/backend/integrations/providers.py +++ b/autogpt_platform/backend/backend/integrations/providers.py @@ -4,7 +4,7 @@ # --8<-- [start:ProviderName] class ProviderName(str, Enum): ANTHROPIC = "anthropic" - AIML_API = "aiml" + AIML_API = "aiml_api" COMPASS = "compass" DISCORD = "discord" D_ID = "d_id" diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index cf07207c621c..def6427d4aec 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -266,7 +266,7 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings): ) openai_api_key: str = Field(default="", description="OpenAI API key") - aiml_api_key: str = Field(default="", description="AI/ML API key") + aiml_api_key: str = Field(default="", description="'AI/ML API' key") anthropic_api_key: str = Field(default="", description="Anthropic API key") groq_api_key: str = Field(default="", description="Groq API key") open_router_api_key: str = Field(default="", description="Open Router API Key") diff --git a/autogpt_platform/frontend/src/app/profile/page.tsx b/autogpt_platform/frontend/src/app/profile/page.tsx index dbd29ffad213..b6f63f7bea74 100644 --- a/autogpt_platform/frontend/src/app/profile/page.tsx +++ b/autogpt_platform/frontend/src/app/profile/page.tsx @@ -38,11 +38,11 @@ export default function PrivatePage() { const [confirmationDialogState, setConfirmationDialogState] = useState< | { - open: true; - message: string; - onConfirm: () => void; - onReject: () => void; - } + open: true; + message: string; + onConfirm: () => void; + onReject: () => void; + } | { open: false } >({ open: false }); @@ -103,7 +103,7 @@ export default function PrivatePage() { "6b9fc200-4726-4973-86c9-cd526f5ce5db", // Replicate "53c25cb8-e3ee-465c-a4d1-e75a4c899c2a", // OpenAI "24e5d942-d9e3-4798-8151-90143ee55629", // Anthropic - "aad82a89-9794-4ebb-977f-d736aa5260a3", // AI/ML + "aad82a89-9794-4ebb-977f-d736aa5260a3", // AI/ML API "4ec22295-8f97-4dd1-b42b-2c6957a02545", // Groq "7f7b0654-c36b-4565-8fa7-9a52575dfae2", // D-ID "7f26de70-ba0d-494e-ba76-238e65e7b45f", // Jina @@ -124,16 +124,16 @@ export default function PrivatePage() { const allCredentials = providers ? Object.values(providers).flatMap((provider) => - [...provider.savedOAuthCredentials, ...provider.savedApiKeys] - .filter((cred) => !hiddenCredentials.includes(cred.id)) - .map((credentials) => ({ - ...credentials, - provider: provider.provider, - providerName: provider.providerName, - ProviderIcon: providerIcons[provider.provider], - TypeIcon: { oauth2: IconUser, api_key: IconKey }[credentials.type], - })), - ) + [...provider.savedOAuthCredentials, ...provider.savedApiKeys] + .filter((cred) => !hiddenCredentials.includes(cred.id)) + .map((credentials) => ({ + ...credentials, + provider: provider.provider, + providerName: provider.providerName, + ProviderIcon: providerIcons[provider.provider], + TypeIcon: { oauth2: IconUser, api_key: IconKey }[credentials.type], + })), + ) : []; return ( diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx index a055fefbde49..e1686c4c2d8d 100644 --- a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx +++ b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx @@ -52,7 +52,7 @@ export const providerIcons: Record< github: FaGithub, google: FaGoogle, groq: fallbackIcon, - aiml: fallbackIcon, + aiml_api: fallbackIcon, notion: NotionLogoIcon, discord: FaDiscord, d_id: fallbackIcon, @@ -77,14 +77,14 @@ export const providerIcons: Record< export type OAuthPopupResultMessage = { message_type: "oauth_popup_result" } & ( | { - success: true; - code: string; - state: string; - } + success: true; + code: string; + state: string; + } | { - success: false; - message: string; - } + success: false; + message: string; + } ); export const CredentialsInput: FC<{ @@ -178,8 +178,7 @@ export const CredentialsInput: FC<{ console.error("Error in OAuth callback:", error); setOAuthError( // type of error is unkown so we need to use String(error) - `Error in OAuth callback: ${ - error instanceof Error ? error.message : String(error) + `Error in OAuth callback: ${error instanceof Error ? error.message : String(error) }`, ); } finally { diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx index b1b3d0f882cc..780c1138fc0e 100644 --- a/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx +++ b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx @@ -16,7 +16,7 @@ const CREDENTIALS_PROVIDER_NAMES = Object.values( // --8<-- [start:CredentialsProviderNames] const providerDisplayNames: Record = { - aiml: "AI/ML", + aiml_api: "AI/ML", anthropic: "Anthropic", discord: "Discord", d_id: "D-ID", diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index fc4adcdeb5be..ae2a2861555d 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -102,7 +102,7 @@ export type CredentialsType = "api_key" | "oauth2"; // --8<-- [start:BlockIOCredentialsSubSchema] export const PROVIDER_NAMES = { ANTHROPIC: "anthropic", - AIML: "aiml", + AIML_API: "aiml_api", D_ID: "d_id", DISCORD: "discord", E2B: "e2b", @@ -150,16 +150,16 @@ export type BlockIONullSubSchema = BlockIOSubSchemaMeta & { type BlockIOCombinedTypeSubSchema = BlockIOSubSchemaMeta & ( | { - allOf: [BlockIOSimpleTypeSubSchema]; - } + allOf: [BlockIOSimpleTypeSubSchema]; + } | { - anyOf: BlockIOSimpleTypeSubSchema[]; - default?: string | number | boolean | null; - } + anyOf: BlockIOSimpleTypeSubSchema[]; + default?: string | number | boolean | null; + } | { - oneOf: BlockIOSimpleTypeSubSchema[]; - default?: string | number | boolean | null; - } + oneOf: BlockIOSimpleTypeSubSchema[]; + default?: string | number | boolean | null; + } ); /* Mirror of backend/data/graph.py:Node */ From fda0ad18ef1c9f8a10254f5071ef9e5eda9add0c Mon Sep 17 00:00:00 2001 From: Ivan Date: Fri, 10 Jan 2025 13:40:27 +0400 Subject: [PATCH 12/14] Minor changes --- classic/original_autogpt/autogpt/app/config.py | 1 - docs/content/classic/configuration/options.md | 1 - docs/content/classic/setup/index.md | 13 ------------- 3 files changed, 15 deletions(-) diff --git a/classic/original_autogpt/autogpt/app/config.py b/classic/original_autogpt/autogpt/app/config.py index 2b49fb5af188..0fa21eb07d58 100644 --- a/classic/original_autogpt/autogpt/app/config.py +++ b/classic/original_autogpt/autogpt/app/config.py @@ -1,5 +1,4 @@ """Configuration class to store the state of bools for different scripts access.""" - from __future__ import annotations import logging diff --git a/docs/content/classic/configuration/options.md b/docs/content/classic/configuration/options.md index 7806bd59590d..9fb18c03e7ab 100644 --- a/docs/content/classic/configuration/options.md +++ b/docs/content/classic/configuration/options.md @@ -7,7 +7,6 @@ You can set configuration variables via the `.env` file. If you don't have a `.e - `AUTHORISE_COMMAND_KEY`: Key response accepted when authorising commands. Default: y - `ANTHROPIC_API_KEY`: Set this if you want to use Anthropic models with AutoGPT -- `AIML_API_KEY`: Set this if you want to use AI/ML models with AutoGPT - `AZURE_CONFIG_FILE`: Location of the Azure Config file relative to the AutoGPT root directory. Default: azure.yaml - `COMPONENT_CONFIG_FILE`: Path to the component configuration file (json) for an agent. Optional - `DISABLED_COMMANDS`: Commands to disable. Use comma separated names of commands. See the list of commands from built-in components [here](../../forge/components/components.md). Default: None diff --git a/docs/content/classic/setup/index.md b/docs/content/classic/setup/index.md index 015fa7ebd063..95f2e6f7a380 100644 --- a/docs/content/classic/setup/index.md +++ b/docs/content/classic/setup/index.md @@ -175,19 +175,6 @@ If you don't know which to choose, you can safely go with OpenAI*. [anthropic/models]: https://docs.anthropic.com/en/docs/models-overview -### AI/ML API -1. Make sure you have credits in your account: [Billing -> View Usage](https://aimlapi.com/app/billing?utm_source=autogpt&utm_medium=github&utm_campaign=integration) -2. Get your AI/ML API key from [Key Management](https://aimlapi.com/app/keys?utm_source=autogpt&utm_medium=github&utm_campaign=integration) -3. Open `.env` -4. Find the line that says `AIML_API_KEY=` -5. Insert your AI/ML API Key directly after = without quotes or spaces: - ```ini - AIML_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - ``` -6. If you would like to see more of our [models](https://aimlapi.com/models?utm_source=autogpt&utm_medium=github&utm_campaign=integration) here or have any questions, we would welcome your [feedback](https://discord.gg/j5QggeZJgY)! -***See you later! With love, your AI/ML team***❤️ - - ### Groq !!! note From ed951b0bda47483bf3156a26bf21fd22eb4b0b25 Mon Sep 17 00:00:00 2001 From: Ivan Date: Fri, 10 Jan 2025 13:45:38 +0400 Subject: [PATCH 13/14] prettier fix --- .../frontend/src/app/profile/page.tsx | 30 +++++++++---------- .../integrations/credentials-input.tsx | 17 ++++++----- .../src/lib/autogpt-server-api/types.ts | 16 +++++----- 3 files changed, 32 insertions(+), 31 deletions(-) diff --git a/autogpt_platform/frontend/src/app/profile/page.tsx b/autogpt_platform/frontend/src/app/profile/page.tsx index b6f63f7bea74..2ca8be997f4a 100644 --- a/autogpt_platform/frontend/src/app/profile/page.tsx +++ b/autogpt_platform/frontend/src/app/profile/page.tsx @@ -38,11 +38,11 @@ export default function PrivatePage() { const [confirmationDialogState, setConfirmationDialogState] = useState< | { - open: true; - message: string; - onConfirm: () => void; - onReject: () => void; - } + open: true; + message: string; + onConfirm: () => void; + onReject: () => void; + } | { open: false } >({ open: false }); @@ -124,16 +124,16 @@ export default function PrivatePage() { const allCredentials = providers ? Object.values(providers).flatMap((provider) => - [...provider.savedOAuthCredentials, ...provider.savedApiKeys] - .filter((cred) => !hiddenCredentials.includes(cred.id)) - .map((credentials) => ({ - ...credentials, - provider: provider.provider, - providerName: provider.providerName, - ProviderIcon: providerIcons[provider.provider], - TypeIcon: { oauth2: IconUser, api_key: IconKey }[credentials.type], - })), - ) + [...provider.savedOAuthCredentials, ...provider.savedApiKeys] + .filter((cred) => !hiddenCredentials.includes(cred.id)) + .map((credentials) => ({ + ...credentials, + provider: provider.provider, + providerName: provider.providerName, + ProviderIcon: providerIcons[provider.provider], + TypeIcon: { oauth2: IconUser, api_key: IconKey }[credentials.type], + })), + ) : []; return ( diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx index b060d1216e26..7a2ec0d65921 100644 --- a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx +++ b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx @@ -86,14 +86,14 @@ export const providerIcons: Record< export type OAuthPopupResultMessage = { message_type: "oauth_popup_result" } & ( | { - success: true; - code: string; - state: string; - } + success: true; + code: string; + state: string; + } | { - success: false; - message: string; - } + success: false; + message: string; + } ); export const CredentialsInput: FC<{ @@ -188,7 +188,8 @@ export const CredentialsInput: FC<{ console.error("Error in OAuth callback:", error); setOAuthError( // type of error is unkown so we need to use String(error) - `Error in OAuth callback: ${error instanceof Error ? error.message : String(error) + `Error in OAuth callback: ${ + error instanceof Error ? error.message : String(error) }`, ); } finally { diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts index 1025475c179e..dc3ebe886818 100644 --- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts +++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts @@ -152,16 +152,16 @@ export type BlockIONullSubSchema = BlockIOSubSchemaMeta & { type BlockIOCombinedTypeSubSchema = BlockIOSubSchemaMeta & ( | { - allOf: [BlockIOSimpleTypeSubSchema]; - } + allOf: [BlockIOSimpleTypeSubSchema]; + } | { - anyOf: BlockIOSimpleTypeSubSchema[]; - default?: string | number | boolean | null; - } + anyOf: BlockIOSimpleTypeSubSchema[]; + default?: string | number | boolean | null; + } | { - oneOf: BlockIOSimpleTypeSubSchema[]; - default?: string | number | boolean | null; - } + oneOf: BlockIOSimpleTypeSubSchema[]; + default?: string | number | boolean | null; + } ); /* Mirror of backend/data/graph.py:Node */ From 78a9677a128da10f9572307c22b81990054bfd33 Mon Sep 17 00:00:00 2001 From: Ivan Date: Mon, 13 Jan 2025 13:44:35 +0400 Subject: [PATCH 14/14] Lint fix --- autogpt_platform/backend/backend/blocks/llm.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index 0f4fce90fcbc..16c1c2ebac48 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -835,7 +835,9 @@ def _combine_summaries( chunk_overlap=input_data.chunk_overlap, ), credentials=credentials, - ).send(None)[1] # Get the first yielded value + ).send(None)[ + 1 + ] # Get the first yielded value class AIConversationBlock(Block): @@ -892,8 +894,7 @@ def __init__(self): "The 2020 World Series was played at Globe Life Field in Arlington, Texas.", ), test_mock={ - "llm_call": lambda *args, - **kwargs: "The 2020 World Series was played at Globe Life Field in Arlington, Texas." + "llm_call": lambda *args, **kwargs: "The 2020 World Series was played at Globe Life Field in Arlington, Texas." }, )