diff --git a/.flake8 b/.flake8 index aa8bb36842f6..5b964172464a 100644 --- a/.flake8 +++ b/.flake8 @@ -9,3 +9,4 @@ exclude = .venv/*, reports/*, dist/*, + data/*, diff --git a/.github/workflows/close-stale-issues.yml b/.github/workflows/close-stale-issues.yml index 615e3ef6451a..f8d66dceaf75 100644 --- a/.github/workflows/close-stale-issues.yml +++ b/.github/workflows/close-stale-issues.yml @@ -13,7 +13,7 @@ jobs: steps: - uses: actions/stale@v8 with: - operations-per-run: 5000 + # operations-per-run: 5000 stale-issue-message: > This issue has automatically been marked as _stale_ because it has not had any activity in the last 50 days. You can _unstale_ it by commenting or @@ -28,7 +28,7 @@ jobs: days-before-stale: 50 days-before-close: 10 # Do not touch meta issues: - exempt-issue-labels: meta + exempt-issue-labels: meta,fridge,project management # Do not affect pull requests: days-before-pr-stale: -1 days-before-pr-close: -1 diff --git a/.github/workflows/hackathon.yml b/.github/workflows/hackathon.yml index 6d80f644ce74..9bd5d5f11c30 100644 --- a/.github/workflows/hackathon.yml +++ b/.github/workflows/hackathon.yml @@ -7,11 +7,29 @@ on: agents: description: "Agents to run (comma-separated)" required: false - default: "ZEROAGPT_03" # Default agents if none are specified + default: "autogpt" # Default agents if none are specified jobs: matrix-setup: runs-on: ubuntu-latest + # Service containers to run with `matrix-setup` + services: + # Label used to access the service container + postgres: + # Docker Hub image + image: postgres + # Provide the password for postgres + env: + POSTGRES_PASSWORD: postgres + # Set health checks to wait until postgres has started + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + # Maps tcp port 5432 on service container to the host + - 5432:5432 outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} env-name: ${{ steps.set-matrix.outputs.env-name }} @@ -20,7 +38,7 @@ jobs: run: | if [ "${{ github.event_name }}" == "schedule" ]; then echo "::set-output name=env-name::production" - echo "::set-output name=matrix::[ 'ZEROAGPT_03', 'evo-ninja', 'gpt-engineer']" + echo "::set-output name=matrix::[ 'irrelevant']" elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then IFS=',' read -ra matrix_array <<< "${{ github.event.inputs.agents }}" matrix_string="[ \"$(echo "${matrix_array[@]}" | sed 's/ /", "/g')\" ]" @@ -28,7 +46,7 @@ jobs: echo "::set-output name=matrix::$matrix_string" else echo "::set-output name=env-name::testing" - echo "::set-output name=matrix::[ 'mini-agi' ]" + echo "::set-output name=matrix::[ 'irrelevant' ]" fi tests: @@ -39,6 +57,23 @@ jobs: min-python-version: "3.10" name: "${{ matrix.agent-name }}" runs-on: ubuntu-latest + services: + # Label used to access the service container + postgres: + # Docker Hub image + image: postgres + # Provide the password for postgres + env: + POSTGRES_PASSWORD: postgres + # Set health checks to wait until postgres has started + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + # Maps tcp port 5432 on service container to the host + - 5432:5432 timeout-minutes: 50 strategy: fail-fast: false @@ -49,6 +84,10 @@ jobs: run: | echo "Matrix Setup Environment Name: ${{ needs.matrix-setup.outputs.env-name }}" + - name: Check Docker Container + id: check + run: docker ps + - name: Checkout repository uses: actions/checkout@v3 with: @@ -70,16 +109,28 @@ jobs: run: | curl -sSL https://install.python-poetry.org | python - + - name: Install Node.js + uses: actions/setup-node@v1 + with: + node-version: v18.15 + - name: Run benchmark run: | link=$(jq -r '.["github_repo_url"]' arena/$AGENT_NAME.json) branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json) git clone "$link" -b "$branch" "$AGENT_NAME" cd $AGENT_NAME + cp ./autogpts/$AGENT_NAME/.env.example ./autogpts/$AGENT_NAME/.env || echo "file not found" ./run agent start $AGENT_NAME - cd benchmark + cd ../benchmark poetry install - poetry run agbenchmark + poetry run agbenchmark --no_dep env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - AGENT_NAME: ${{ matrix.agent-name }} + SERP_API_KEY: ${{ secrets.SERP_API_KEY }} + SERPAPI_API_KEY: ${{ secrets.SERP_API_KEY }} + WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }} + WEAVIATE_URL: ${{ secrets.WEAVIATE_URL }} + GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }} + GOOGLE_CUSTOM_SEARCH_ENGINE_ID: ${{ secrets.GOOGLE_CUSTOM_SEARCH_ENGINE_ID }} + AGENT_NAME: ${{ matrix.agent-name }} \ No newline at end of file diff --git a/QUICKSTART.md b/QUICKSTART.md index 86add5511427..5eab892e348e 100644 --- a/QUICKSTART.md +++ b/QUICKSTART.md @@ -90,12 +90,12 @@ If you continue to experience issues, consider storing your project files within ![Create an Agent](docs/content/imgs/quickstart/007_create_agent.png) - Upon creating your agent its time to offically enter the Arena! + Upon creating your agent its time to officially enter the Arena! Do so by running `./run arena enter YOUR_AGENT_NAME` ![Enter the Arena](docs/content/imgs/quickstart/008_enter_arena.png) - > Note: for adavanced yours, create a new branch and create a file called YOUR_AGENT_NAME.json in the arena directory. Then commit this and create a PR to merge into the main repo. Only single file entries will be permitted. The json file needs the following format. + > Note: for advanced users, create a new branch and create a file called YOUR_AGENT_NAME.json in the arena directory. Then commit this and create a PR to merge into the main repo. Only single file entries will be permitted. The json file needs the following format. ```json { "github_repo_url": "https://github.com/Swiftyos/YourAgentName", diff --git a/arena/AGENT_JARVIS.json b/arena/AGENT_JARVIS.json new file mode 100644 index 000000000000..ac284f6aa1c6 --- /dev/null +++ b/arena/AGENT_JARVIS.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/filipjakubowski/AutoGPT", + "timestamp": "2023-11-04T10:13:11.039444", + "commit_hash_to_benchmark": "78e92234d63a69b5471da0c0e62ce820a9109dd4", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Ahmad.json b/arena/Ahmad.json new file mode 100644 index 000000000000..2b5b86f12481 --- /dev/null +++ b/arena/Ahmad.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/JawadAbu/AutoGPT.git", + "timestamp": "2023-11-05T12:35:35.352028", + "commit_hash_to_benchmark": "a1d60878141116641ea864ef6de7ca6142e9534c", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Susan.json b/arena/Susan.json new file mode 100644 index 000000000000..4689ef84e2b2 --- /dev/null +++ b/arena/Susan.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/CodeZeno/Susan", + "timestamp": "2023-11-03T11:29:28.704822", + "commit_hash_to_benchmark": "82fecfae1b4fb5d64050eefa77d8f028292aa8f3", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/autogpt.json b/arena/autogpt.json new file mode 100644 index 000000000000..931aa3aa5cf8 --- /dev/null +++ b/arena/autogpt.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Significant-Gravitas/AutoGPT", + "timestamp": "2023-11-15T07:22:09.723393", + "commit_hash_to_benchmark": "fa357dd13928baa4d1e30054bc75edc5d68b08f1", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/autogptagent.json b/arena/autogptagent.json new file mode 100644 index 000000000000..589001597df6 --- /dev/null +++ b/arena/autogptagent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/YasienDwieb/AutoGPT", + "timestamp": "2023-11-04T21:13:17.223261", + "commit_hash_to_benchmark": "0b55de62dc61a33ccf944d80b6d55c730286e07d", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/bingoTesting.json b/arena/bingoTesting.json new file mode 100644 index 000000000000..a8fd1e210e0c --- /dev/null +++ b/arena/bingoTesting.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/bingotyty/AutoGPT", + "timestamp": "2023-11-06T04:16:38.612948", + "commit_hash_to_benchmark": "a1d60878141116641ea864ef6de7ca6142e9534c", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/qinghu3.json b/arena/qinghu3.json new file mode 100644 index 000000000000..06b4a4d943de --- /dev/null +++ b/arena/qinghu3.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/QingHu1227/AutoGPT.git", + "timestamp": "2023-11-06T04:11:34.227212", + "commit_hash_to_benchmark": "a1d60878141116641ea864ef6de7ca6142e9534c", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/songyalei.json b/arena/songyalei.json new file mode 100644 index 000000000000..2c3b7dcc3032 --- /dev/null +++ b/arena/songyalei.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/songyalei/AutoGPT", + "timestamp": "2023-11-16T07:11:39.746384", + "commit_hash_to_benchmark": "fa357dd13928baa4d1e30054bc75edc5d68b08f1", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/autogpts/autogpt/autogpt/agent_factory/profile_generator.py b/autogpts/autogpt/autogpt/agent_factory/profile_generator.py index a4617c810419..34362e20c4a7 100644 --- a/autogpts/autogpt/autogpt/agent_factory/profile_generator.py +++ b/autogpts/autogpt/autogpt/agent_factory/profile_generator.py @@ -36,9 +36,10 @@ class AgentProfileGeneratorConfiguration(SystemConfiguration): "\n" "Example Input:\n" '"""Help me with marketing my business"""\n\n' - "Example Function Call:\n" + "Example Call:\n" "```\n" - "{" + "[" # tool_calls + '{"type": "function", "function": {' '"name": "create_agent",' ' "arguments": {' '"name": "CMOGPT",' @@ -65,7 +66,9 @@ class AgentProfileGeneratorConfiguration(SystemConfiguration): "]" # constraints "}" # directives "}" # arguments - "}\n" + "}" # function + "}" # tool call + "]\n" # tool_calls "```" ) ) @@ -172,7 +175,9 @@ def parse_response_content( """ try: - arguments = json_loads(response_content["function_call"]["arguments"]) + arguments = json_loads( + response_content["tool_calls"][0]["function"]["arguments"] + ) ai_profile = AIProfile( ai_name=arguments.get("name"), ai_role=arguments.get("description"), diff --git a/autogpts/autogpt/autogpt/agents/agent.py b/autogpts/autogpt/autogpt/agents/agent.py index 633c3e62b926..13275a4e85a5 100644 --- a/autogpts/autogpt/autogpt/agents/agent.py +++ b/autogpts/autogpt/autogpt/agents/agent.py @@ -187,13 +187,14 @@ def parse_and_process_response( NEXT_ACTION_FILE_NAME, ) - self.event_history.register_action( - Action( - name=command_name, - args=arguments, - reasoning=assistant_reply_dict["thoughts"]["reasoning"], + if command_name: + self.event_history.register_action( + Action( + name=command_name, + args=arguments, + reasoning=assistant_reply_dict["thoughts"]["reasoning"], + ) ) - ) return command_name, arguments, assistant_reply_dict diff --git a/autogpts/autogpt/autogpt/agents/base.py b/autogpts/autogpt/autogpt/agents/base.py index db3bec6ee13a..1dd9ac493354 100644 --- a/autogpts/autogpt/autogpt/agents/base.py +++ b/autogpts/autogpt/autogpt/agents/base.py @@ -61,7 +61,7 @@ class BaseAgentConfiguration(SystemConfiguration): default_cycle_instruction: str = DEFAULT_TRIGGERING_PROMPT """The default instruction passed to the AI for a thinking cycle.""" - big_brain: bool = UserConfigurable(default=False) + big_brain: bool = UserConfigurable(default=True) """ Whether this agent uses the configured smart LLM (default) to think, as opposed to the configured fast LLM. Enabling this disables hybrid mode. diff --git a/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py b/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py index d927d65ce95e..5873c604c550 100644 --- a/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py +++ b/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py @@ -42,7 +42,8 @@ class OneShotAgentPromptConfiguration(SystemConfiguration): "{resources}\n" "\n" "## Commands\n" - "You have access to the following commands:\n" + "These are the ONLY commands you can use." + " Any action you perform must be possible through one of these commands:\n" "{commands}\n" "\n" "## Best practices\n" @@ -62,6 +63,13 @@ class OneShotAgentPromptConfiguration(SystemConfiguration): type=JSONSchema.Type.OBJECT, required=True, properties={ + "observations": JSONSchema( + description=( + "Relevant observations from your last action (if any)" + ), + type=JSONSchema.Type.STRING, + required=False, + ), "text": JSONSchema( description="Thoughts", type=JSONSchema.Type.STRING, @@ -71,13 +79,13 @@ class OneShotAgentPromptConfiguration(SystemConfiguration): type=JSONSchema.Type.STRING, required=True, ), - "plan": JSONSchema( - description="Short markdown-style bullet list that conveys the long-term plan", + "self_criticism": JSONSchema( + description="Constructive self-criticism", type=JSONSchema.Type.STRING, required=True, ), - "criticism": JSONSchema( - description="Constructive self-criticism", + "plan": JSONSchema( + description="Short markdown-style bullet list that conveys the long-term plan", type=JSONSchema.Type.STRING, required=True, ), @@ -308,7 +316,7 @@ def response_format_instruction(self, use_functions_api: bool) -> str: ) return ( - f"Respond strictly with a JSON object{' containing your thoughts, and a function_call specifying the next command to use' if use_functions_api else ''}. " + f"Respond strictly with a JSON object{' containing your thoughts, and a tool_call specifying the next command to use' if use_functions_api else ''}. " "The JSON object should be compatible with the TypeScript type `Response` from the following:\n" f"{response_format}" ) @@ -423,11 +431,13 @@ def extract_command( Exception: If any other error occurs """ if use_openai_functions_api: - if "function_call" not in assistant_reply: - raise InvalidAgentResponseError("No 'function_call' in assistant reply") + if not assistant_reply.get("tool_calls"): + raise InvalidAgentResponseError("No 'tool_calls' in assistant reply") assistant_reply_json["command"] = { - "name": assistant_reply["function_call"]["name"], - "args": json.loads(assistant_reply["function_call"]["arguments"]), + "name": assistant_reply["tool_calls"][0]["function"]["name"], + "args": json.loads( + assistant_reply["tool_calls"][0]["function"]["arguments"] + ), } try: if not isinstance(assistant_reply_json, dict): diff --git a/autogpts/autogpt/autogpt/app/agent_protocol_server.py b/autogpts/autogpt/autogpt/app/agent_protocol_server.py index 84573edc405b..9d5faa91b4e6 100644 --- a/autogpts/autogpt/autogpt/app/agent_protocol_server.py +++ b/autogpts/autogpt/autogpt/app/agent_protocol_server.py @@ -64,12 +64,7 @@ async def start(self, port: int = 8000, router: APIRouter = base_router): # Add CORS middleware origins = [ - "http://localhost:5000", - "http://127.0.0.1:5000", - "http://localhost:8000", - "http://127.0.0.1:8000", - "http://localhost:8080", - "http://127.0.0.1:8080", + "*", # Add any other origins you want to whitelist ] @@ -226,6 +221,8 @@ async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Ste if execute_command == ask_user.__name__: # HACK execute_result = ActionSuccessResult(outputs=user_input) agent.event_history.register_result(execute_result) + elif not execute_command: + execute_result = None elif execute_approved: step = await self.db.update_step( task_id=task_id, @@ -262,7 +259,8 @@ async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Ste output = ( ( f"Command `{execute_command}({fmt_kwargs(execute_command_args)})` returned:" - f" {execute_result}\n\n" + + ("\n\n" if "\n" in str(execute_result) else " ") + + f"{execute_result}\n\n" ) if execute_command_args and execute_command != "ask_user" else "" diff --git a/autogpts/autogpt/autogpt/app/main.py b/autogpts/autogpt/autogpt/app/main.py index c974ae15ae94..683793a70b0d 100644 --- a/autogpts/autogpt/autogpt/app/main.py +++ b/autogpts/autogpt/autogpt/app/main.py @@ -556,14 +556,17 @@ def handle_stop_signal() -> None: handle_stop_signal() - result = await agent.execute(command_name, command_args, user_input) + if command_name: + result = await agent.execute(command_name, command_args, user_input) - if result.status == "success": - logger.info(result, extra={"title": "SYSTEM:", "title_color": Fore.YELLOW}) - elif result.status == "error": - logger.warn( - f"Command {command_name} returned an error: {result.error or result.reason}" - ) + if result.status == "success": + logger.info( + result, extra={"title": "SYSTEM:", "title_color": Fore.YELLOW} + ) + elif result.status == "error": + logger.warn( + f"Command {command_name} returned an error: {result.error or result.reason}" + ) def update_user( @@ -689,7 +692,7 @@ def print_assistant_thoughts( ) assistant_thoughts_plan = remove_ansi_escape(assistant_thoughts.get("plan", "")) assistant_thoughts_criticism = remove_ansi_escape( - assistant_thoughts.get("criticism", "") + assistant_thoughts.get("self_criticism", "") ) assistant_thoughts_speak = remove_ansi_escape( assistant_thoughts.get("speak", "") diff --git a/autogpts/autogpt/autogpt/commands/web_search.py b/autogpts/autogpt/autogpt/commands/web_search.py index 8c9a9334ce57..c4411d9361d7 100644 --- a/autogpts/autogpt/autogpt/commands/web_search.py +++ b/autogpts/autogpt/autogpt/commands/web_search.py @@ -61,12 +61,23 @@ def web_search(query: str, agent: Agent, num_results: int = 8) -> str: { "title": r["title"], "url": r["href"], - **({"description": r["body"]} if r.get("body") else {}), + **({"exerpt": r["body"]} if r.get("body") else {}), } for r in search_results ] - results = json.dumps(search_results, ensure_ascii=False, indent=4) + results = ( + "## Search results\n" + # "Read these results carefully." + # " Extract the information you need for your task from the list of results" + # " if possible. Otherwise, choose a webpage from the list to read entirely." + # "\n\n" + ) + "\n\n".join( + f"### \"{r['title']}\"\n" + f"**URL:** {r['url']} \n" + "**Excerpt:** " + (f'"{exerpt}"' if (exerpt := r.get("exerpt")) else "N/A") + for r in search_results + ) return safe_google_results(results) diff --git a/autogpts/autogpt/autogpt/config/config.py b/autogpts/autogpt/autogpt/config/config.py index 5436a670bb82..871479e88acb 100644 --- a/autogpts/autogpt/autogpt/config/config.py +++ b/autogpts/autogpt/autogpt/config/config.py @@ -55,7 +55,7 @@ class Config(SystemSettings, arbitrary_types_allowed=True): prompt_settings_file: Path = project_root / PROMPT_SETTINGS_FILE # Model configuration fast_llm: str = "gpt-3.5-turbo-16k" - smart_llm: str = "gpt-4-0314" + smart_llm: str = "gpt-4" temperature: float = 0 openai_functions: bool = False embedding_model: str = "text-embedding-ada-002" diff --git a/autogpts/autogpt/autogpt/core/ARCHITECTURE_NOTES.md b/autogpts/autogpt/autogpt/core/ARCHITECTURE_NOTES.md index e66633f25bcb..af6aac7b78d7 100644 --- a/autogpts/autogpt/autogpt/core/ARCHITECTURE_NOTES.md +++ b/autogpts/autogpt/autogpt/core/ARCHITECTURE_NOTES.md @@ -40,7 +40,7 @@ for the breaking version change. We justified this by saying: ## Secondary goals -- Use existing tools to ditch any unneccesary cruft in the codebase (document loading, +- Use existing tools to ditch any unnecessary cruft in the codebase (document loading, json parsing, anything easier to replace than to port). - Bring in the [core agent loop updates](https://whimsical.com/agent-workflow-v2-NmnTQ8R7sVo7M3S43XgXmZ) being developed simultaneously by @Pwuts @@ -195,7 +195,7 @@ Plugins are a kind of garbage term. They refer to a number of things. The current plugin system is _hook-based_. This means plugins don't correspond to kinds of objects in the system, but rather to times in the system at which we defer execution to them. The main advantage of this setup is that user code can hijack -pretty much any behavior of the agent by injecting code that supercedes the normal +pretty much any behavior of the agent by injecting code that supersedes the normal agent execution. The disadvantages to this approach are numerous: - We have absolutely no mechanisms to enforce any security measures because the threat @@ -235,7 +235,7 @@ There are three kinds of things (roughly) that are written as classes in the re- is *data* and we use **[Pydantic](https://docs.pydantic.dev/latest/)** to manage it as pydantic is basically industry standard for this stuff. It provides runtime validation for all the configuration and allows us to easily serialize configuration to both basic - python types (dicts, lists, and primatives) as well as serialize to json, which is + python types (dicts, lists, and primitives) as well as serialize to json, which is important for us being able to put representations of agents [on the wire](https://en.wikipedia.org/wiki/Wire_protocol) for web applications and agent-to-agent communication. *These are essentially diff --git a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py index b5a45a8bbeb8..6f00276cf2ac 100644 --- a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py +++ b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/initial_plan.py @@ -169,7 +169,9 @@ def parse_response_content( The parsed response. """ try: - parsed_response = json_loads(response_content["function_call"]["arguments"]) + parsed_response = json_loads( + response_content["tool_calls"][0]["function"]["arguments"] + ) parsed_response["task_list"] = [ Task.parse_obj(task) for task in parsed_response["task_list"] ] diff --git a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py index 0cb2b557fc13..360821b50263 100644 --- a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py +++ b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/name_and_goals.py @@ -133,7 +133,9 @@ def parse_response_content( """ try: - parsed_response = json_loads(response_content["function_call"]["arguments"]) + parsed_response = json_loads( + response_content["tool_calls"][0]["function"]["arguments"] + ) except KeyError: logger.debug(f"Failed to parse this response content: {response_content}") raise diff --git a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py index 725128c5ce2e..6efed7beed99 100644 --- a/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py +++ b/autogpts/autogpt/autogpt/core/planning/prompt_strategies/next_ability.py @@ -170,9 +170,9 @@ def parse_response_content( """ try: - function_name = response_content["function_call"]["name"] + function_name = response_content["tool_calls"][0]["function"]["name"] function_arguments = json_loads( - response_content["function_call"]["arguments"] + response_content["tool_calls"][0]["function"]["arguments"] ) parsed_response = { "motivation": function_arguments.pop("motivation"), diff --git a/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py b/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py index 4167b7c19ec5..d242942ae0a1 100644 --- a/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py +++ b/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py @@ -16,7 +16,7 @@ ) from autogpt.core.resource.model_providers.schema import ( AssistantChatMessageDict, - AssistantFunctionCallDict, + AssistantToolCallDict, ChatMessage, ChatModelInfo, ChatModelProvider, @@ -49,6 +49,7 @@ class OpenAIModelName(str, enum.Enum): GPT3_v1 = "gpt-3.5-turbo-0301" GPT3_v2 = "gpt-3.5-turbo-0613" GPT3_v2_16k = "gpt-3.5-turbo-16k-0613" + GPT3_v3 = "gpt-3.5-turbo-1106" GPT3_ROLLING = "gpt-3.5-turbo" GPT3_ROLLING_16k = "gpt-3.5-turbo-16k" GPT3 = GPT3_ROLLING @@ -58,8 +59,10 @@ class OpenAIModelName(str, enum.Enum): GPT4_v1_32k = "gpt-4-32k-0314" GPT4_v2 = "gpt-4-0613" GPT4_v2_32k = "gpt-4-32k-0613" + GPT4_v3 = "gpt-4-1106-preview" GPT4_ROLLING = "gpt-4" GPT4_ROLLING_32k = "gpt-4-32k" + GPT4_VISION = "gpt-4-vision-preview" GPT4 = GPT4_ROLLING GPT4_32k = GPT4_ROLLING_32k @@ -97,6 +100,15 @@ class OpenAIModelName(str, enum.Enum): max_tokens=16384, has_function_call_api=True, ), + ChatModelInfo( + name=OpenAIModelName.GPT3_v3, + service=ModelProviderService.CHAT, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.001 / 1000, + completion_token_cost=0.002 / 1000, + max_tokens=16384, + has_function_call_api=True, + ), ChatModelInfo( name=OpenAIModelName.GPT4, service=ModelProviderService.CHAT, @@ -115,6 +127,15 @@ class OpenAIModelName(str, enum.Enum): max_tokens=32768, has_function_call_api=True, ), + ChatModelInfo( + name=OpenAIModelName.GPT4_v3, + service=ModelProviderService.CHAT, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.01 / 1000, + completion_token_cost=0.03 / 1000, + max_tokens=128000, + has_function_call_api=True, + ), ] } # Copy entries for models with equivalent specs @@ -271,7 +292,7 @@ async def create_chat_completion( """Create a completion using the OpenAI API.""" completion_kwargs = self._get_completion_kwargs(model_name, functions, **kwargs) - functions_compat_mode = functions and "functions" not in completion_kwargs + tool_calls_compat_mode = functions and "tools" not in completion_kwargs if "messages" in completion_kwargs: model_prompt += completion_kwargs["messages"] del completion_kwargs["messages"] @@ -287,8 +308,8 @@ async def create_chat_completion( } response_message = response.choices[0].message.to_dict_recursive() - if functions_compat_mode: - response_message["function_call"] = _functions_compat_extract_call( + if tool_calls_compat_mode: + response_message["tool_calls"] = _tool_calls_compat_extract_calls( response_message["content"] ) response = ChatModelResponse( @@ -346,10 +367,15 @@ def _get_completion_kwargs( if functions: if OPEN_AI_CHAT_MODELS[model_name].has_function_call_api: - completion_kwargs["functions"] = [f.schema for f in functions] + completion_kwargs["tools"] = [ + {"type": "function", "function": f.schema} for f in functions + ] if len(functions) == 1: # force the model to call the only specified function - completion_kwargs["function_call"] = {"name": functions[0].name} + completion_kwargs["tool_choice"] = { + "type": "function", + "function": {"name": functions[0].name}, + } else: # Provide compatibility with older models _functions_compat_fix_kwargs(functions, completion_kwargs) @@ -411,7 +437,7 @@ async def _create_chat_completion( The completion. """ raw_messages = [ - message.dict(include={"role", "content", "function_call", "name"}) + message.dict(include={"role", "content", "tool_calls", "name"}) for message in messages ] return await openai.ChatCompletion.acreate( @@ -573,14 +599,27 @@ def _functions_compat_fix_kwargs( ), }, ) + tool_calls_schema = JSONSchema( + type=JSONSchema.Type.ARRAY, + items=JSONSchema( + type=JSONSchema.Type.OBJECT, + properties={ + "type": JSONSchema( + type=JSONSchema.Type.STRING, + enum=["function"], + ), + "function": function_call_schema, + }, + ), + ) completion_kwargs["messages"] = [ ChatMessage.system( - "# function_call instructions\n\n" - "Specify a '```function_call' block in your response," - " enclosing a function call in the form of a valid JSON object" - " that adheres to the following schema:\n\n" - f"{function_call_schema.to_dict()}\n\n" - "Put the function_call block at the end of your response" + "# tool usage instructions\n\n" + "Specify a '```tool_calls' block in your response," + " with a valid JSON object that adheres to the following schema:\n\n" + f"{tool_calls_schema.to_dict()}\n\n" + "Specify any tools that you need to use through this JSON object.\n\n" + "Put the tool_calls block at the end of your response" " and include its fences if it is not the only content.\n\n" "## functions\n\n" "For the function call itself, use one of the following" @@ -589,19 +628,21 @@ def _functions_compat_fix_kwargs( ] -def _functions_compat_extract_call(response: str) -> AssistantFunctionCallDict: +def _tool_calls_compat_extract_calls(response: str) -> list[AssistantToolCallDict]: import json import re - logging.debug(f"Trying to extract function call from response:\n{response}") + logging.debug(f"Trying to extract tool calls from response:\n{response}") - if response[0] == "{": - function_call = json.loads(response) + if response[0] == "[": + tool_calls: list[AssistantToolCallDict] = json.loads(response) else: - block = re.search(r"```(?:function_call)?\n(.*)\n```\s*$", response, re.DOTALL) + block = re.search(r"```(?:tool_calls)?\n(.*)\n```\s*$", response, re.DOTALL) if not block: - raise ValueError("Could not find function call block in response") - function_call = json.loads(block.group(1)) + raise ValueError("Could not find tool calls block in response") + tool_calls: list[AssistantToolCallDict] = json.loads(block.group(1)) + + for t in tool_calls: + t["function"]["arguments"] = str(t["function"]["arguments"]) # HACK - function_call["arguments"] = str(function_call["arguments"]) # HACK - return function_call + return tool_calls diff --git a/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py b/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py index 14e5618c8bf7..ccf3255b4d6d 100644 --- a/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py +++ b/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py @@ -77,16 +77,28 @@ class AssistantFunctionCallDict(TypedDict): arguments: str +class AssistantToolCall(BaseModel): + # id: str + type: Literal["function"] + function: AssistantFunctionCall + + +class AssistantToolCallDict(TypedDict): + # id: str + type: Literal["function"] + function: AssistantFunctionCallDict + + class AssistantChatMessage(ChatMessage): role: Literal["assistant"] content: Optional[str] - function_call: Optional[AssistantFunctionCall] + tool_calls: Optional[list[AssistantToolCall]] class AssistantChatMessageDict(TypedDict, total=False): role: str content: str - function_call: AssistantFunctionCallDict + tool_calls: list[AssistantToolCallDict] class CompletionModelFunction(BaseModel): diff --git a/autogpts/autogpt/autogpt/llm/providers/openai.py b/autogpts/autogpt/autogpt/llm/providers/openai.py index 28459748f544..aac24ffc8837 100644 --- a/autogpts/autogpt/autogpt/llm/providers/openai.py +++ b/autogpts/autogpt/autogpt/llm/providers/openai.py @@ -1,20 +1,9 @@ from __future__ import annotations -import functools import logging -import time from typing import Callable, Iterable, TypeVar -from unittest.mock import patch - -import openai -import openai.api_resources.abstract.engine_api_resource as engine_api_resource -from colorama import Fore, Style -from openai.error import APIError, RateLimitError, ServiceUnavailableError, Timeout -from openai.openai_object import OpenAIObject from autogpt.core.resource.model_providers import CompletionModelFunction -from autogpt.core.utils.json_schema import JSONSchema -from autogpt.logs.helpers import request_user_double_check from autogpt.models.command import Command logger = logging.getLogger(__name__) @@ -23,141 +12,6 @@ T = TypeVar("T", bound=Callable) -def meter_api(func: T) -> T: - """Adds ApiManager metering to functions which make OpenAI API calls""" - from autogpt.llm.api_manager import ApiManager - - api_manager = ApiManager() - - openai_obj_processor = openai.util.convert_to_openai_object - - def update_usage_with_response(response: OpenAIObject): - try: - usage = response.usage - logger.debug(f"Reported usage from call to model {response.model}: {usage}") - api_manager.update_cost( - response.usage.prompt_tokens, - response.usage.completion_tokens if "completion_tokens" in usage else 0, - response.model, - ) - except Exception as err: - logger.warn(f"Failed to update API costs: {err.__class__.__name__}: {err}") - - def metering_wrapper(*args, **kwargs): - openai_obj = openai_obj_processor(*args, **kwargs) - if isinstance(openai_obj, OpenAIObject) and "usage" in openai_obj: - update_usage_with_response(openai_obj) - return openai_obj - - @functools.wraps(func) - def metered_func(*args, **kwargs): - with patch.object( - engine_api_resource.util, - "convert_to_openai_object", - side_effect=metering_wrapper, - ): - return func(*args, **kwargs) - - return metered_func - - -def retry_api( - max_retries: int = 10, - backoff_base: float = 2.0, - warn_user: bool = True, -): - """Retry an OpenAI API call. - - Args: - num_retries int: Number of retries. Defaults to 10. - backoff_base float: Base for exponential backoff. Defaults to 2. - warn_user bool: Whether to warn the user. Defaults to True. - """ - error_messages = { - ServiceUnavailableError: "The OpenAI API engine is currently overloaded", - RateLimitError: "Reached rate limit", - } - api_key_error_msg = ( - f"Please double check that you have setup a " - f"{Style.BRIGHT}PAID{Style.NORMAL} OpenAI API Account. You can " - f"read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}" - ) - backoff_msg = "Waiting {backoff} seconds..." - - def _wrapper(func: T) -> T: - @functools.wraps(func) - def _wrapped(*args, **kwargs): - user_warned = not warn_user - max_attempts = max_retries + 1 # +1 for the first attempt - for attempt in range(1, max_attempts + 1): - try: - return func(*args, **kwargs) - - except (RateLimitError, ServiceUnavailableError) as e: - if attempt >= max_attempts or ( - # User's API quota exceeded - isinstance(e, RateLimitError) - and (err := getattr(e, "error", {})) - and err.get("code") == "insufficient_quota" - ): - raise - - error_msg = error_messages[type(e)] - logger.warn(error_msg) - if not user_warned: - request_user_double_check(api_key_error_msg) - logger.debug(f"Status: {e.http_status}") - logger.debug(f"Response body: {e.json_body}") - logger.debug(f"Response headers: {e.headers}") - user_warned = True - - except (APIError, Timeout) as e: - if (e.http_status not in [429, 502]) or (attempt == max_attempts): - raise - - backoff = backoff_base ** (attempt + 2) - logger.warn(backoff_msg.format(backoff=backoff)) - time.sleep(backoff) - - return _wrapped - - return _wrapper - - -def format_openai_function_for_prompt(func: CompletionModelFunction) -> str: - """Returns the function formatted similarly to the way OpenAI does it internally: - https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/18 - - Example: - ```ts - // Get the current weather in a given location - type get_current_weather = (_: { - // The city and state, e.g. San Francisco, CA - location: string, - unit?: "celsius" | "fahrenheit", - }) => any; - ``` - """ - - def param_signature(name: str, spec: JSONSchema) -> str: - # TODO: enum type support - type_dec = ( - spec.type if not spec.enum else " | ".join(repr(e) for e in spec.enum) - ) - return ( - f"// {spec.description}\n" if spec.description else "" - ) + f"{name}{'' if spec.required else '?'}: {type_dec}," - - return "\n".join( - [ - f"// {func.description}", - f"type {func.name} = (_ :{{", - *[param_signature(name, p) for name, p in func.parameters.items()], - "}) => any;", - ] - ) - - def get_openai_command_specs( commands: Iterable[Command], ) -> list[CompletionModelFunction]: @@ -172,50 +26,3 @@ def get_openai_command_specs( ) for command in commands ] - - -def count_openai_functions_tokens( - functions: list[CompletionModelFunction], for_model: str -) -> int: - """Returns the number of tokens taken up by a set of function definitions - - Reference: https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/18 - """ - from autogpt.llm.utils import ( - count_string_tokens, # FIXME: maybe move to OpenAIProvider? - ) - - return count_string_tokens( - f"# Tools\n\n## functions\n\n{format_function_specs_as_typescript_ns(functions)}", - for_model, - ) - - -def format_function_specs_as_typescript_ns( - functions: list[CompletionModelFunction], -) -> str: - """Returns a function signature block in the format used by OpenAI internally: - https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/18 - - For use with `count_string_tokens` to determine token usage of provided functions. - - Example: - ```ts - namespace functions { - - // Get the current weather in a given location - type get_current_weather = (_: { - // The city and state, e.g. San Francisco, CA - location: string, - unit?: "celsius" | "fahrenheit", - }) => any; - - } // namespace functions - ``` - """ - - return ( - "namespace functions {\n\n" - + "\n\n".join(format_openai_function_for_prompt(f) for f in functions) - + "\n\n} // namespace functions" - ) diff --git a/autogpts/autogpt/pyproject.toml b/autogpts/autogpt/pyproject.toml index 6d873d302387..1877c02797b8 100644 --- a/autogpts/autogpt/pyproject.toml +++ b/autogpts/autogpt/pyproject.toml @@ -108,7 +108,7 @@ line-length = 88 target-version = ['py310'] include = '\.pyi?$' packages = ["autogpt"] -extend-exclude = '.+/(dist|.venv|venv|build)/.+' +extend-exclude = '.+/(dist|.venv|venv|build|data)/.+' [tool.isort] diff --git a/autogpts/forge/forge/sdk/abilities/registry.py b/autogpts/forge/forge/sdk/abilities/registry.py index dddb2d8a66bf..c4b853ec2c4c 100644 --- a/autogpts/forge/forge/sdk/abilities/registry.py +++ b/autogpts/forge/forge/sdk/abilities/registry.py @@ -84,7 +84,7 @@ def decorator(func): func_param_names = set(func_params.keys()) if param_names != func_param_names: raise ValueError( - f"Mismatch in parameter names. Ability Annotation includes {param_names}, but function acatually takes {func_param_names} in function {func.__name__} signature" + f"Mismatch in parameter names. Ability Annotation includes {param_names}, but function actually takes {func_param_names} in function {func.__name__} signature" ) func.ability = Ability( name=name, diff --git a/autogpts/forge/tutorials/001_getting_started.md b/autogpts/forge/tutorials/001_getting_started.md index 0b015cc3066a..4fb899951270 100644 --- a/autogpts/forge/tutorials/001_getting_started.md +++ b/autogpts/forge/tutorials/001_getting_started.md @@ -58,7 +58,7 @@ Create your agent template using the command: ![Create an Agent](../../../docs/content/imgs/quickstart/007_create_agent.png) ### Entering the Arena -The Arena is a collection of all AutoGPT agents ranked by performance on our benchmark. Entering the Arena is a required step for participating in AutoGPT hackathons. It's early days, so show us what you've got! +The Arena is a collection of all AutoGPT agents. It serves as a competitive environment where all agents are assessed to find the best generalist agent. Entering the Arena is a required step for participating in AutoGPT hackathons. It allows your agent to be part of a diverse and dynamic ecosystem, where it is periodically assessed by the benchmark to be scored on the official leaderboard. Officially enter the Arena by executing the command: diff --git a/benchmark/agbenchmark/reports/processing/report_types.py b/benchmark/agbenchmark/reports/processing/report_types.py index e2fb1bc62354..d2fc8dea302f 100644 --- a/benchmark/agbenchmark/reports/processing/report_types.py +++ b/benchmark/agbenchmark/reports/processing/report_types.py @@ -51,6 +51,8 @@ class Test(BaseModelBenchmark): category: List[str] task: str reached_cutoff: bool + metadata: Any + class ReportBase(BaseModelBenchmark): @@ -68,6 +70,7 @@ class Report(ReportBase): tests: Dict[str, Test] + class ReportV2(Test, ReportBase): test_name: str run_id: str | None diff --git a/benchmark/agbenchmark/utils/data_types.py b/benchmark/agbenchmark/utils/data_types.py index 74b509329e13..955b1d6a86d4 100644 --- a/benchmark/agbenchmark/utils/data_types.py +++ b/benchmark/agbenchmark/utils/data_types.py @@ -174,6 +174,9 @@ class Category(str, Enum): GENERALIST = "general" CODING = "coding" SCRAPE_SYNTHESIZE = "scrape_synthesize" + GAIA_1 = "GAIA_1" + GAIA_2 = "GAIA_2" + GAIA_3 = "GAIA_3" class ChallengeData(BaseModel): diff --git a/cli.py b/cli.py index 0887e4c4f275..6c45695a8a22 100644 --- a/cli.py +++ b/cli.py @@ -219,7 +219,7 @@ def create(agent_name): import re import shutil - if not re.match("\W*$", agent_name): + if not re.match(r"\w*$", agent_name): click.echo( click.style( f"😞 Agent name '{agent_name}' is not valid. It should not contain spaces or special characters other than -_", diff --git a/docs/content/setup.md b/docs/content/AutoGPT/Setups/Docker-setup.md similarity index 60% rename from docs/content/setup.md rename to docs/content/AutoGPT/Setups/Docker-setup.md index 19fb7b9b50ac..af1c49d1f61a 100644 --- a/docs/content/setup.md +++ b/docs/content/AutoGPT/Setups/Docker-setup.md @@ -1,37 +1,12 @@ -# Setting up AutoGPT - -## 📋 Requirements - -Choose an environment to run AutoGPT in (pick one): - - - [Docker](https://docs.docker.com/get-docker/) (*recommended*) - - Python 3.10 or later (instructions: [for Windows](https://www.tutorialspoint.com/how-to-install-python-in-windows)) - - [VSCode + devcontainer](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) - - -## 🗝️ Getting an API key - -Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](https://platform.openai.com/account/api-keys). - -!!! attention - To use the OpenAI API with AutoGPT, we strongly recommend **setting up billing** - (AKA paid account). Free accounts are [limited][openai/api limits] to 3 API calls per - minute, which can cause the application to crash. - - You can set up a paid account at [Manage account > Billing > Overview](https://platform.openai.com/account/billing/overview). - -[openai/api limits]: https://platform.openai.com/docs/guides/rate-limits/overview#:~:text=Free%20trial%20users,RPM%0A40%2C000%20TPM +### Set up with Docker -!!! important - It's highly recommended that you keep track of your API costs on [the Usage page](https://platform.openai.com/account/usage). - You can also set limits on how much you spend on [the Usage limits page](https://platform.openai.com/account/billing/limits). +!!! important "Docker Setup Issue" + We are addressing a known issue with the Docker setup related to Poetry. -![For OpenAI API key to work, set up paid account at OpenAI API > Billing](./imgs/openai-api-key-billing-paid-account.png) + [**We have an open PR if you'd like to take a look**](https://github.com/python-poetry/poetry/issues/8548) + Please keep this in mind. We apologize for any inconvenience, and thank you for your patience. -## Setting up AutoGPT - -### Set up with Docker 1. Make sure you have Docker installed, see [requirements](#requirements) 2. Create a project directory for AutoGPT @@ -74,8 +49,6 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt docker pull significantgravitas/auto-gpt ``` -6. Continue to [Run with Docker](#run-with-docker) - !!! note "Docker only supports headless browsing" AutoGPT uses a browser in headless mode by default: `HEADLESS_BROWSER=True`. Please do not change this setting in combination with Docker, or AutoGPT will crash. @@ -83,37 +56,6 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt [Docker Hub]: https://hub.docker.com/r/significantgravitas/auto-gpt [repository]: https://github.com/Significant-Gravitas/AutoGPT - -### Set up with Git - -!!! important - Make sure you have [Git](https://git-scm.com/downloads) installed for your OS. - -!!! info "Executing commands" - To execute the given commands, open a CMD, Bash, or Powershell window. - On Windows: press ++win+x++ and pick *Terminal*, or ++win+r++ and enter `cmd` - -1. Clone the repository - - ```shell - git clone https://github.com/Significant-Gravitas/AutoGPT.git - ``` - -2. Navigate to the directory where you downloaded the repository - - ```shell - cd AutoGPT/autogpts/autogpt - ``` - -### Set up without Git/Docker - -!!! warning - We recommend to use Git or Docker, to make updating easier. Also note that some features such as Python execution will only work inside docker for security reasons. - -1. Download `Source code (zip)` from the [latest release](https://github.com/Significant-Gravitas/AutoGPT/releases/latest) -2. Extract the zip-file into a folder - - ### Configuration 1. Find the file named `.env.template` in the main `Auto-GPT` folder. This file may @@ -159,10 +101,7 @@ Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](htt [openai-python docs]: https://github.com/openai/openai-python#microsoft-azure-endpoints [Azure OpenAI docs]: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line - -## Running AutoGPT - -### Run with Docker +## Running AutoGPT In Docker Easiest is to use `docker compose`. @@ -195,7 +134,7 @@ Once you have a recent version of Docker Compose, run the commands below in your want this, comment or remove the `depends: - redis` and `redis:` sections from `docker-compose.yml`. - For related settings, see [Memory > Redis setup](./configuration/memory.md#redis-setup). + For related settings, see [Memory > Redis setup](../configuration/memory.md) You can pass extra arguments, e.g. running with `--gpt3only` and `--continuous`: @@ -211,47 +150,4 @@ docker run -it --env-file=.env -v $PWD:/app auto-gpt docker run -it --env-file=.env -v $PWD:/app --rm auto-gpt --gpt3only --continuous ``` -[Docker Compose file]: https://github.com/Significant-Gravitas/AutoGPT/blob/master/autogpts/autogpt/docker-compose.yml - - -### Run with Dev Container - -1. Install the [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension in VS Code. - -2. Open command palette with ++f1++ and type `Dev Containers: Open Folder in Container`. - -3. Run `./run.sh`. - - -### Run without Docker - -#### Create a Virtual Environment - -Create a virtual environment to run in. - -```shell -python -m venv .venv -source .venv/bin/activate -pip3 install --upgrade pip -``` - -!!! warning - Due to security reasons, certain features (like Python execution) will by default be disabled when running without docker. So, even if you want to run the program outside a docker container, you currently still need docker to actually run scripts. - -Simply run the startup script in your terminal. This will install any necessary Python -packages and launch AutoGPT. - -- On Linux/MacOS: - - ```shell - ./run.sh - ``` - -- On Windows: - - ```shell - .\run.bat - ``` - -If this gives errors, make sure you have a compatible Python version installed. See also -the [requirements](./installation.md#requirements). +[Docker Compose file]: https://github.com/Significant-Gravitas/AutoGPT/blob/stable/docker-compose.yml \ No newline at end of file diff --git a/docs/content/AutoGPT/Setups/Git-setup.md b/docs/content/AutoGPT/Setups/Git-setup.md new file mode 100644 index 000000000000..230f38ec4a5e --- /dev/null +++ b/docs/content/AutoGPT/Setups/Git-setup.md @@ -0,0 +1,81 @@ +### Set up with Git + +!!! important + Make sure you have [Git](https://git-scm.com/downloads) installed for your OS. + +!!! info "Executing commands" + To execute the given commands, open a CMD, Bash, or Powershell window. + On Windows: press ++win+x++ and pick *Terminal*, or ++win+r++ and enter `cmd` + +1. Clone the repository + + ```shell + git clone -b stable https://github.com/Significant-Gravitas/AutoGPT.git + ``` + +2. Navigate to the directory where you downloaded the repository + + ```shell + cd AutoGPT/autogpts/autogpt + ``` + +### Configuration + +1. Find the file named `.env.template` in the main `Auto-GPT` folder. This file may + be hidden by default in some operating systems due to the dot prefix. To reveal + hidden files, follow the instructions for your specific operating system: + [Windows][show hidden files/Windows] and [macOS][show hidden files/macOS]. +2. Create a copy of `.env.template` and call it `.env`; + if you're already in a command prompt/terminal window: + ```shell + cp .env.template .env + ``` +3. Open the `.env` file in a text editor. +4. Find the line that says `OPENAI_API_KEY=`. +5. Insert your OpenAI API Key directly after = without quotes or spaces.. + ```yaml + OPENAI_API_KEY=sk-qwertykeys123456 + ``` +6. Enter any other API keys or tokens for services you would like to use. + + !!! note + To activate and adjust a setting, remove the `# ` prefix. + +7. Save and close the `.env` file. + +!!! info "Using a GPT Azure-instance" + If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and + make an Azure configuration file: + + - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all the deployment IDs for the relevant models in the `azure_model_map` section: + - `fast_llm_deployment_id`: your gpt-3.5-turbo or gpt-4 deployment ID + - `smart_llm_deployment_id`: your gpt-4 deployment ID + - `embedding_model_deployment_id`: your text-embedding-ada-002 v2 deployment ID + + Example: + + ```yaml + # Please specify all of these values as double-quoted strings + # Replace string in angled brackets (<>) to your own deployment Name + azure_model_map: + fast_llm_deployment_id: "" + ... + ``` + + Details can be found in the [openai-python docs], and in the [Azure OpenAI docs] for the embedding model. + If you're on Windows you may need to install an [MSVC library](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170). + +[show hidden files/Windows]: https://support.microsoft.com/en-us/windows/view-hidden-files-and-folders-in-windows-97fbc472-c603-9d90-91d0-1166d1d9f4b5 +[show hidden files/macOS]: https://www.pcmag.com/how-to/how-to-access-your-macs-hidden-files +[openai-python docs]: https://github.com/openai/openai-python#microsoft-azure-endpoints +[Azure OpenAI docs]: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line + +## Running AutoGPT + +### Run with Dev Container + +1. Install the [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension in VS Code. + +2. Open command palette with ++f1++ and type `Dev Containers: Open Folder in Container`. + +3. Run `./run.sh`. \ No newline at end of file diff --git a/docs/content/AutoGPT/Setups/nogit-setup.md b/docs/content/AutoGPT/Setups/nogit-setup.md new file mode 100644 index 000000000000..e5464eee88e8 --- /dev/null +++ b/docs/content/AutoGPT/Setups/nogit-setup.md @@ -0,0 +1,85 @@ +### Setting Up without Git/Docker + +!!! warning + We recommend to use Git or Docker, to make updating easier. Also note that some features such as Python execution will only work inside docker for security reasons. + +1. Download `Source code (zip)` from the [latest stable release](https://github.com/Significant-Gravitas/AutoGPT/releases/latest) +2. Extract the zip-file into a folder + + +### Configuration + +1. Find the file named `.env.template` in the main `Auto-GPT` folder. This file may + be hidden by default in some operating systems due to the dot prefix. To reveal + hidden files, follow the instructions for your specific operating system: + [Windows][show hidden files/Windows], [macOS][show hidden files/macOS]. +2. Create a copy of `.env.template` and call it `.env`; + if you're already in a command prompt/terminal window: `cp .env.template .env`. +3. Open the `.env` file in a text editor. +4. Find the line that says `OPENAI_API_KEY=`. +5. After the `=`, enter your unique OpenAI API Key *without any quotes or spaces*. +6. Enter any other API keys or tokens for services you would like to use. + + !!! note + To activate and adjust a setting, remove the `# ` prefix. + +7. Save and close the `.env` file. + +!!! info "Using a GPT Azure-instance" + If you want to use GPT on an Azure instance, set `USE_AZURE` to `True` and + make an Azure configuration file: + + - Rename `azure.yaml.template` to `azure.yaml` and provide the relevant `azure_api_base`, `azure_api_version` and all the deployment IDs for the relevant models in the `azure_model_map` section: + - `fast_llm_deployment_id`: your gpt-3.5-turbo or gpt-4 deployment ID + - `smart_llm_deployment_id`: your gpt-4 deployment ID + - `embedding_model_deployment_id`: your text-embedding-ada-002 v2 deployment ID + + Example: + + ```yaml + # Please specify all of these values as double-quoted strings + # Replace string in angled brackets (<>) to your own deployment Name + azure_model_map: + fast_llm_deployment_id: "" + ... + ``` + + Details can be found in the [openai-python docs], and in the [Azure OpenAI docs] for the embedding model. + If you're on Windows you may need to install an [MSVC library](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170). + +[show hidden files/Windows]: https://support.microsoft.com/en-us/windows/view-hidden-files-and-folders-in-windows-97fbc472-c603-9d90-91d0-1166d1d9f4b5 +[show hidden files/macOS]: https://www.pcmag.com/how-to/how-to-access-your-macs-hidden-files +[openai-python docs]: https://github.com/openai/openai-python#microsoft-azure-endpoints +[Azure OpenAI docs]: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/tutorials/embeddings?tabs=command-line + + +### Start AutoGPT In A irtual Environment + +First we need to create a virtual environment to run in. + +```shell +python -m venv .venv +source .venv/bin/activate +pip3 install --upgrade pip +``` + +!!! warning + Due to security reasons, certain features (like Python execution) will by default be disabled when running without docker. So, even if you want to run the program outside a docker container, you currently still need docker to actually run scripts. + +Simply run the startup script in your terminal. This will install any necessary Python +packages and launch AutoGPT. + +- On Linux/MacOS: + + ```shell + ./run.sh + ``` + +- On Windows: + + ```shell + .\run.bat + ``` + +If this gives errors, make sure you have a compatible Python version installed. See also +the [requirements](./installation.md#requirements). diff --git a/docs/content/configuration/imagegen.md b/docs/content/AutoGPT/configuration/imagegen.md similarity index 100% rename from docs/content/configuration/imagegen.md rename to docs/content/AutoGPT/configuration/imagegen.md diff --git a/docs/content/configuration/memory.md b/docs/content/AutoGPT/configuration/memory.md similarity index 100% rename from docs/content/configuration/memory.md rename to docs/content/AutoGPT/configuration/memory.md diff --git a/docs/content/configuration/options.md b/docs/content/AutoGPT/configuration/options.md similarity index 100% rename from docs/content/configuration/options.md rename to docs/content/AutoGPT/configuration/options.md diff --git a/docs/content/configuration/search.md b/docs/content/AutoGPT/configuration/search.md similarity index 100% rename from docs/content/configuration/search.md rename to docs/content/AutoGPT/configuration/search.md diff --git a/docs/content/configuration/voice.md b/docs/content/AutoGPT/configuration/voice.md similarity index 100% rename from docs/content/configuration/voice.md rename to docs/content/AutoGPT/configuration/voice.md diff --git a/docs/content/plugins.md b/docs/content/AutoGPT/plugins.md similarity index 100% rename from docs/content/plugins.md rename to docs/content/AutoGPT/plugins.md diff --git a/docs/content/AutoGPT/setup.md b/docs/content/AutoGPT/setup.md new file mode 100644 index 000000000000..1d58cfc304cd --- /dev/null +++ b/docs/content/AutoGPT/setup.md @@ -0,0 +1,49 @@ +# Setting up AutoGPT + +## 📋 Requirements + +Choose an environment to run AutoGPT in (pick one): + + - [Docker](https://docs.docker.com/get-docker/) (*recommended*) + - Python 3.10 or later (instructions: [for Windows](https://www.tutorialspoint.com/how-to-install-python-in-windows)) + - [VSCode + devcontainer](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) + + +## 🗝️ Getting an API key + +Get your OpenAI API key from: [https://platform.openai.com/account/api-keys](https://platform.openai.com/account/api-keys). + +!!! attention + To use the OpenAI API with AutoGPT, we strongly recommend **setting up billing** + (AKA paid account). Free accounts are [limited][openai/api limits] to 3 API calls per + minute, which can cause the application to crash. + + You can set up a paid account at [Manage account > Billing > Overview](https://platform.openai.com/account/billing/overview). + +[openai/api limits]: https://platform.openai.com/docs/guides/rate-limits/overview#:~:text=Free%20trial%20users,RPM%0A40%2C000%20TPM + +!!! important + It's highly recommended that you keep track of your API costs on [the Usage page](https://platform.openai.com/account/usage). + You can also set limits on how much you spend on [the Usage limits page](https://platform.openai.com/account/billing/limits). + +![For OpenAI API key to work, set up paid account at OpenAI API > Billing](/imgs/openai-api-key-billing-paid-account.png) + + +## Setting up AutoGPT + +### If you plan to use Docker please follow this setup. + +!!! important "Docker Setup Issue" + We are addressing a known issue with the Docker setup related to Poetry. + + [**We have an open PR if you'd like to take a look**](https://github.com/python-poetry/poetry/issues/8548) + + Please keep this in mind. We apologize for any inconvenience, and thank you for your patience. + +[Docker Install Here.](Setups/Docker-setup.md) + +### If you plan to use Git please follow this setup. +[Git Setup Here.](Setups/Git-setup.md) + +### If you dont want to use git or docker for the setup follow here. +[No Git Or Docker Setup Here.](Setups/nogit-setup.md) \ No newline at end of file diff --git a/docs/content/share-your-logs.md b/docs/content/AutoGPT/share-your-logs.md similarity index 100% rename from docs/content/share-your-logs.md rename to docs/content/AutoGPT/share-your-logs.md diff --git a/docs/content/testing.md b/docs/content/AutoGPT/testing.md similarity index 100% rename from docs/content/testing.md rename to docs/content/AutoGPT/testing.md diff --git a/docs/content/usage.md b/docs/content/AutoGPT/usage.md similarity index 100% rename from docs/content/usage.md rename to docs/content/AutoGPT/usage.md diff --git a/docs/content/benchmarks/benchmark.md b/docs/content/benchmarks/benchmark.md new file mode 100644 index 000000000000..b51f03c6f82d --- /dev/null +++ b/docs/content/benchmarks/benchmark.md @@ -0,0 +1,25 @@ +# Auto-GPT Benchmarks + +Built for the purpose of benchmarking the performance of agents regardless of how they work. + +Objectively know how well your agent is performing in categories like code, retrieval, memory, and safety. + +Save time and money while doing it through smart dependencies. The best part? It's all automated. + +## Scores: + +Screenshot 2023-07-25 at 10 35 01 AM + +## Ranking overall: + +- 1- [Beebot](https://github.com/AutoPackAI/beebot) +- 2- [mini-agi](https://github.com/muellerberndt/mini-agi) +- 3- [Auto-GPT](https://github.com/Significant-Gravitas/AutoGPT) + +## Detailed results: + +Screenshot 2023-07-25 at 10 42 15 AM + +[Click here to see the results and the raw data!](https://docs.google.com/spreadsheets/d/1WXm16P2AHNbKpkOI0LYBpcsGG0O7D8HYTG5Uj0PaJjA/edit#gid=203558751)! + +More agents coming soon ! diff --git a/docs/content/challenges/introduction.md b/docs/content/challenges/introduction.md index 5f8e8d9ea3c6..4da7d4fe0708 100644 --- a/docs/content/challenges/introduction.md +++ b/docs/content/challenges/introduction.md @@ -1,4 +1,3 @@ -introduction.md # Introduction to Challenges Welcome to the AutoGPT Challenges page! This is a space where we encourage community members to collaborate and contribute towards improving AutoGPT by identifying and solving challenges that AutoGPT is not yet able to achieve. diff --git a/docs/content/forge/get-started.md b/docs/content/forge/get-started.md new file mode 100644 index 000000000000..2ea8c433bc05 --- /dev/null +++ b/docs/content/forge/get-started.md @@ -0,0 +1,31 @@ +# **AutoGPT-Forge**: Build Your Own AutoGPT Agent! + +### 🌌 Dive into the Universe of AutoGPT Creation! 🌌 + +Ever dreamt of becoming the genius behind an AI agent? Dive into the *Forge*, where **you** become the creator! + +--- + +### 🛠️ **Why AutoGPT-Forge?** +- 💤 **No More Boilerplate!** - Don't let the mundane tasks stop you. Fork and build without the headache of starting from scratch! +- 🧠 **Brain-centric Development!** - All the tools you need so you can spend 100% of your time on what matters - crafting the brain of your AI! +- 🛠️ **Tooling ecosystem!** - We work with the best in class tools to bring you the best experience possible! +--- + +### 🚀 **Get Started!** + +The getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) will guide you through the process of setting up your project all the way through to building a generalist agent. + +1. [AutoGPT Forge: A Comprehensive Guide to Your First Steps](https://aiedge.medium.com/autogpt-forge-a-comprehensive-guide-to-your-first-steps-a1dfdf46e3b4) +2. [AutoGPT Forge: The Blueprint of an AI Agent](https://aiedge.medium.com/autogpt-forge-the-blueprint-of-an-ai-agent-75cd72ffde6) +3. [AutoGPT Forge: Interacting with your Agent](https://aiedge.medium.com/autogpt-forge-interacting-with-your-agent-1214561b06b) +4. [AutoGPT Forge: Crafting Intelligent Agent Logic](https://medium.com/@aiedge/autogpt-forge-crafting-intelligent-agent-logic-bc5197b14cb4) + + +Coming soon: + + +3. Interacting with and Benchmarking your Agent +4. Abilities +5. The Planning Loop +6. Memories diff --git a/docs/content/front-end/the-ui.md b/docs/content/front-end/the-ui.md new file mode 100644 index 000000000000..e3b6e8a9fbc3 --- /dev/null +++ b/docs/content/front-end/the-ui.md @@ -0,0 +1,62 @@ +# AutoGPT Flutter Client + +## Description + +This repository contains the Flutter client for the AutoGPT project. The application facilitates users in discussing various tasks with a single agent. The app is built to be cross-platform and runs on Web, Android, iOS, Windows, and Mac. + +## Features + +- List and manage multiple tasks. +- Engage in chat conversations related to selected tasks. + +## Design document + +The design document for this project provides a detailed outline of the architecture, components, and other important aspects of this application. Please note that this is a living, growing document and it is subject to change as the project evolves. + +You can access the design document [here](https://docs.google.com/document/d/1S-o2np1gq5JwFq40wPHDUVLi-mylz4WMvCB8psOUjc8/). + +## Requirements + +- Flutter 3.x +- Dart 3.x + +Flutter comes with Dart, to install Flutter, follow the instructions here: https://docs.flutter.dev/get-started/install + +## Installation + +1. **Clone the repo:** +``` +git clone https://github.com/Significant-Gravitas/AutoGPT.git +``` + +2. **Navigate to the project directory:** +``` +cd AutoGPT/frontend +``` + +3. **Get Flutter packages:** +``` +flutter pub get +``` + +4. **Run the app:** +``` +flutter run -d chrome --web-port 5000 +``` + +## Project Structure + +- `lib/`: Contains the main source code for the application. +- `models/`: Data models that define the structure of the objects used in the app. +- `views/`: The UI components of the application. +- `viewmodels/`: The business logic and data handling for the views. +- `services/`: Contains the service classes that handle communication with backend APIs and other external data sources. These services are used to fetch and update data that the app uses, and they are consumed by the ViewModels. +- `test/`: Contains the test files for unit and widget tests. + +## Responsive Design + +The app features a responsive design that adapts to different screen sizes and orientations. On larger screens (Web, Windows, Mac), views are displayed side by side horizontally. On smaller screens (Android, iOS), views are displayed in a tab bar controller layout. + +## License + +This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. diff --git a/docs/content/index.md b/docs/content/index.md index 0f32a9f47369..16cfc91a1c8c 100644 --- a/docs/content/index.md +++ b/docs/content/index.md @@ -1,8 +1,63 @@ # AutoGPT docs -Welcome to AutoGPT. Please follow the [Installation](/setup/) guide to get started. +Welcome to the AutoGPT Docs. -!!! note - It is recommended to use a virtual machine/container (docker) for tasks that require high security measures to prevent any potential harm to the main computer's system and data. If you are considering to use AutoGPT outside a virtualized/containerized environment, you are *strongly* advised to use a separate user account just for running AutoGPT. This is even more important if you are going to allow AutoGPT to write/execute scripts and run shell commands! +The AutoGPT project has been divided into four distinct sections: [Agents](#welcome-to-the-heart-of-autogpt), [Benchmarks](#autogpt-benchmarks), [Forge](#autogpt-forge), and [Frontend](#frontend) -It is for these reasons that executing python scripts is explicitly disabled when running outside a container environment. +--- + +## [Welcome to the heart of AutoGPT](AutoGPT/setup.md) + +Welcome to the heart of AutoGPT, the project that kicked it all off: a semi-autonomous agent powered by LLMs to execute any task for you*. + +We continue to develop this project with the goal of providing access to AI assistance to the masses, and building the future transparently and together. + +- 💡 **Explore** - See what AI can do and be inspired by a glimpse of the future. + +- 🚀 **Build with us** - We welcome any input, whether it's code or ideas for new features or improvements! Join us on [Discord](https://discord.gg/autogpt) and find out how you can join in on the action. + +* it isn't quite there yet, but that is the ultimate goal that we are still pursuing + +--- + +## [AutoGPT-Benchmarks](benchmarks/benchmark.md) + +Test to impress with AutoGPT Benchmarks! Our benchmarking system offers a stringent testing environment to evaluate your agents objectively. + +- 📈 **Top Performance** - Among our currently benchmarked agents, AutoGPT consistently scores the best. The top-performing generalist agent will earn its position as the primary AutoGPT post-hackathon. + +- 🔌 **Agent Protocol Standardization** - AutoGPT uses the agent protocol from the AI Engineer Foundation to ensure seamless compatibility. + +--- + +## [AutoGPT-Forge](forge/get-started.md) + +Forge your future with AutoGPT! The Forge is your innovation lab. + +- 🏗️ **Building with Ease** - We've set the groundwork so you can focus on your agent's personality and capabilities. Comprehensive tutorials are available [Here](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec). + +--- + +## [Frontend](front-end/the-ui.md) + +Harness your AI journey with the AutoGPT FrontEnd. + +- 🎮 **User-Friendly Interface** - Manage your agents effortlessly. + +- 🔄 **Seamless Integration** - Smooth connectivity between your agent and our benchmarking system. + +--- + +[Join the AutoGPT Discord server for any queries](discord.gg/autogpt) + +### Glossary of Terms + +- **Repository**: Space where your project resides. +- **Forking**: Copying a repository under your account. +- **Cloning**: Making a local copy of a repository. +- **Agent**: The AutoGPT you'll create and develop. +- **Benchmarking**: Testing your agent's skills in the Forge. +- **Forge**: The template for building your AutoGPT agent. +- **Frontend**: The UI for tasks, logs, and task history. + +--- diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 3c73b5c9a6cd..b246c387789f 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -4,21 +4,35 @@ repo_url: https://github.com/Significant-Gravitas/AutoGPT docs_dir: content nav: - Home: index.md - - Setup: setup.md - - Usage: usage.md - - Plugins: plugins.md - - Configuration: - - Options: configuration/options.md - - Search: configuration/search.md - - Memory: configuration/memory.md - - Voice: configuration/voice.md - - Image Generation: configuration/imagegen.md - - Help us improve AutoGPT: - - Share your debug logs with us: share-your-logs.md - - Contribution guide: contributing.md - - Running tests: testing.md - - Code of Conduct: code-of-conduct.md + - AutoGPT Agent: + - Agent-setup: + - Setting up AutoGPT: AutoGPT/setup.md + - Docker-setup: AutoGPT/Setups/Docker-setup.md + - Git-setup: AutoGPT/Setups/Git-setup.md + - No Git/Docker-setup: AutoGPT/Setups/nogit-setup.md + - Usage: AutoGPT/usage.md + - Plugins: AutoGPT/plugins.md + - Configuration: + - Options: AutoGPT/configuration/options.md + - Search: AutoGPT/configuration/search.md + - Memory: AutoGPT/configuration/memory.md + - Voice: AutoGPT/configuration/voice.md + - Image Generation: AutoGPT/configuration/imagegen.md + - Help us improve AutoGPT: + - Share your debug logs with us: AutoGPT/share-your-logs.md + - Contribution guide: contributing.md + - Running tests: AutoGPT/testing.md + - Code of Conduct: code-of-conduct.md + + - AutoGPT-Benchmarks: + - Benchmarks: benchmarks/benchmark.md + + - AutoGPT-Forge: + - Forge-setup: forge/get-started.md + + - Frontend: + - Flutter-setup: front-end/the-ui.md - Challenges: - Introduction: challenges/introduction.md @@ -40,6 +54,7 @@ nav: theme: name: material + custom_dir: overrides icon: logo: material/book-open-variant favicon: favicon.png @@ -114,4 +129,4 @@ extra_javascript: - _javascript/tablesort.js - _javascript/mathjax.js - https://polyfill.io/v3/polyfill.min.js?features=es6 - - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js + - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js \ No newline at end of file diff --git a/docs/overrides/main.html b/docs/overrides/main.html new file mode 100644 index 000000000000..b376724b8b62 --- /dev/null +++ b/docs/overrides/main.html @@ -0,0 +1,61 @@ +{% extends "base.html" %} + + +{% block extrahead %} + + + + + +{% raw %} + +{% endraw %} +{% endblock %} \ No newline at end of file