diff --git a/autogpts/autogpt/agbenchmark_config/challenges_already_beaten.json b/autogpts/autogpt/agbenchmark_config/challenges_already_beaten.json new file mode 100644 index 00000000000..90df480341c --- /dev/null +++ b/autogpts/autogpt/agbenchmark_config/challenges_already_beaten.json @@ -0,0 +1,10 @@ +{ + "TestWriteFile": true, + "TestReadFile": true, + "TestSortCsv": true, + "TestAnswerQuestionSmallCsv": true, + "TestCombineCsv": true, + "TestLabelCsv": true, + "TestAnswerQuestionCsv": true, + "TestAnswerQuestionCombineCsv": false +} \ No newline at end of file diff --git a/autogpts/autogpt/output.csv b/autogpts/autogpt/output.csv new file mode 100644 index 00000000000..8afe84bf014 --- /dev/null +++ b/autogpts/autogpt/output.csv @@ -0,0 +1,4 @@ +Age,ID,Name,Occupation,Salary +28,101,John,Engineer,80000 +34,102,Alice,Doctor,120000 +45,103,Bob,Lawyer,95000 diff --git a/rnd/autogpt_server/README.md b/rnd/autogpt_server/README.md new file mode 100644 index 00000000000..69b8f0dce8e --- /dev/null +++ b/rnd/autogpt_server/README.md @@ -0,0 +1,17 @@ +# Next Gen AutoGPT + +This is a research project into creating the next generation of autogpt, which is an autogpt agent server. + +It will come with the AutoGPT Agent as the default agent + + +## Project Outline + +The project mainly consist of two main component: + +agent_server: Responsible as the API server for creating an agent. +agent_executor: Responsible for executor the agent. + + + + diff --git a/rnd/autogpt_server/autogpt_server/agent_api/__init__.py b/rnd/autogpt_server/autogpt_server/agent_api/__init__.py new file mode 100644 index 00000000000..ce16e5f4abd --- /dev/null +++ b/rnd/autogpt_server/autogpt_server/agent_api/__init__.py @@ -0,0 +1 @@ +from .server import start_server diff --git a/rnd/autogpt_server/autogpt_server/agent_api/server.py b/rnd/autogpt_server/autogpt_server/agent_api/server.py new file mode 100644 index 00000000000..3aa9b9648e9 --- /dev/null +++ b/rnd/autogpt_server/autogpt_server/agent_api/server.py @@ -0,0 +1,26 @@ +import uvicorn + +from fastapi import FastAPI + +from autogpt_server.data import ExecutionQueue + +app = FastAPI( + title="AutoGPT Agent Server", + description=( + "This server is used to execute agents that are created by the AutoGPT system." + ), + summary="AutoGPT Agent Server", + version="0.1", +) + +execution_queue : ExecutionQueue = None + +@app.post("/agents/{agent_id}/execute") +def execute_agent(agent_id: str): + execution_id = execution_queue.add(agent_id) + return {"execution_id": execution_id, "agent_id": agent_id} + +def start_server(queue: ExecutionQueue): + global execution_queue + execution_queue = queue + uvicorn.run(app) diff --git a/rnd/autogpt_server/autogpt_server/agent_executor/__init__.py b/rnd/autogpt_server/autogpt_server/agent_executor/__init__.py new file mode 100644 index 00000000000..f966553631f --- /dev/null +++ b/rnd/autogpt_server/autogpt_server/agent_executor/__init__.py @@ -0,0 +1 @@ +from .executor import start_executor diff --git a/rnd/autogpt_server/autogpt_server/agent_executor/executor.py b/rnd/autogpt_server/autogpt_server/agent_executor/executor.py new file mode 100644 index 00000000000..b1ce0ea6ff4 --- /dev/null +++ b/rnd/autogpt_server/autogpt_server/agent_executor/executor.py @@ -0,0 +1,26 @@ +import logging +import time + +from concurrent.futures import ThreadPoolExecutor + +from autogpt_server.data import ExecutionQueue + +logger = logging.getLogger(__name__) + + +# TODO: Replace this by an actual Agent Execution. +def __execute(id: str, data: str) -> None: + for i in range(5): + print(f"Executor processing step {i}, execution_id: {id}, data: {data}") + time.sleep(1) + print(f"Executor processing completed, execution_id: {id}, data: {data}") + + +def start_executor(pool_size: int, queue: ExecutionQueue) -> None: + with ThreadPoolExecutor(max_workers=pool_size) as executor: + while True: + execution = queue.get() + if not execution: + time.sleep(1) + continue + executor.submit(__execute, execution.execution_id, execution.data) diff --git a/rnd/autogpt_server/autogpt_server/app.py b/rnd/autogpt_server/autogpt_server/app.py new file mode 100644 index 00000000000..fb52c83c79c --- /dev/null +++ b/rnd/autogpt_server/autogpt_server/app.py @@ -0,0 +1,11 @@ +from multiprocessing import Process + +from autogpt_server.agent_api import start_server +from autogpt_server.agent_executor import start_executor +from autogpt_server.data import ExecutionQueue + +if __name__ == "__main__": + queue = ExecutionQueue() + executor_process = Process(target=start_executor, args=(5,queue)) + executor_process.start() + start_server(queue) diff --git a/rnd/autogpt_server/autogpt_server/data.py b/rnd/autogpt_server/autogpt_server/data.py new file mode 100644 index 00000000000..ac24704dfa9 --- /dev/null +++ b/rnd/autogpt_server/autogpt_server/data.py @@ -0,0 +1,25 @@ +import uuid + +from multiprocessing import Queue + +class Execution: + def __init__(self, execution_id: str, data: str): + self.execution_id = execution_id + self.data = data + +# TODO: Replace this by a persistent queue. +class ExecutionQueue: + def __init__(self): + self.queue = Queue() + + def add(self, data: str) -> str: + execution_id = uuid.uuid4() + self.queue.put(Execution(str(execution_id), data)) + return str(execution_id) + + def get(self) -> Execution | None: + return self.queue.get() + + def empty(self) -> bool: + return self.queue.empty() + \ No newline at end of file diff --git a/rnd/nextgenautogpt/poetry.lock b/rnd/autogpt_server/poetry.lock similarity index 100% rename from rnd/nextgenautogpt/poetry.lock rename to rnd/autogpt_server/poetry.lock diff --git a/rnd/nextgenautogpt/pyproject.toml b/rnd/autogpt_server/pyproject.toml similarity index 92% rename from rnd/nextgenautogpt/pyproject.toml rename to rnd/autogpt_server/pyproject.toml index 1b3d366befb..62b4e08f70e 100644 --- a/rnd/nextgenautogpt/pyproject.toml +++ b/rnd/autogpt_server/pyproject.toml @@ -1,5 +1,5 @@ [tool.poetry] -name = "rnd" +name = "autogpt_server" version = "0.1.0" description = "" authors = ["SwiftyOS "] diff --git a/rnd/autogpt_server/run.sh b/rnd/autogpt_server/run.sh new file mode 100644 index 00000000000..420e7e848e1 --- /dev/null +++ b/rnd/autogpt_server/run.sh @@ -0,0 +1 @@ +python3 -m autogpt_server.app diff --git a/rnd/nextgenautogpt/README.md b/rnd/nextgenautogpt/README.md deleted file mode 100644 index 5104b2f44bc..00000000000 --- a/rnd/nextgenautogpt/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# Next Gen AutoGPT - -This is a research project into creating the next generation of autogpt, which is an autogpt agent server. - -It will come with the AutoGPT Agent as the default agent - - -## Project Outline - -``` -. -├── READEME.md -├── nextgenautogpt -│ ├── __init__.py -│ ├── __main__.py -│ ├── cli.py # The CLI tool for running the system -│ ├── executor # The Component Executor Process -│ │ └── __init__.py -│ ├── manager # The Agent Manager it manages a pool of executors and schedules components to run -│ │ └── __init__.py -│ └── server # The main application. It includes the api server and additional modules -│ └── __init__.py -└── pyproject.toml -``` - - - diff --git a/rnd/nextgenautogpt/nextgenautogpt/__init__.py b/rnd/nextgenautogpt/nextgenautogpt/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/rnd/nextgenautogpt/nextgenautogpt/__main__.py b/rnd/nextgenautogpt/nextgenautogpt/__main__.py deleted file mode 100644 index d5295305852..00000000000 --- a/rnd/nextgenautogpt/nextgenautogpt/__main__.py +++ /dev/null @@ -1,39 +0,0 @@ -import multiprocessing as mp -from typing import Any - -import nextgenautogpt.manager.manager as mod_manager -import nextgenautogpt.server.server as mod_server - - -def main() -> None: - # Create queues/pipes for communication - server_to_manager: mp.Queue[Any] = mp.Queue() - manager_to_server: mp.Queue[Any] = mp.Queue() - - # Create and start server process - server: mp.Process = mp.Process( - target=mod_server.run_server, - args=( - server_to_manager, - manager_to_server, - ), - ) - server.start() - - # Create and start manager process - manager: mp.Process = mp.Process( - target=mod_manager.run_manager, - args=( - server_to_manager, - manager_to_server, - ), - ) - manager.start() - - server.join() - manager.join() - - -if __name__ == "__main__": - mp.set_start_method("spawn") - main() diff --git a/rnd/nextgenautogpt/nextgenautogpt/cli.py b/rnd/nextgenautogpt/nextgenautogpt/cli.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/rnd/nextgenautogpt/nextgenautogpt/executor/__init__.py b/rnd/nextgenautogpt/nextgenautogpt/executor/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/rnd/nextgenautogpt/nextgenautogpt/executor/executor.py b/rnd/nextgenautogpt/nextgenautogpt/executor/executor.py deleted file mode 100644 index 73c63bb56bb..00000000000 --- a/rnd/nextgenautogpt/nextgenautogpt/executor/executor.py +++ /dev/null @@ -1,15 +0,0 @@ -import multiprocessing as mp -import time -from typing import Any - - -def run_executor(manager_to_executor: mp.Queue, executors_to_manager: mp.Queue) -> None: - # Each executor process will run this initializer - print("Executor process started") - while True: - if not manager_to_executor.empty(): - task = manager_to_executor.get() - print(f"Executor processing: {task}") - executors_to_manager.put("Task completed") - # Simulate executor work - time.sleep(1) diff --git a/rnd/nextgenautogpt/nextgenautogpt/manager/__init__.py b/rnd/nextgenautogpt/nextgenautogpt/manager/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/rnd/nextgenautogpt/nextgenautogpt/manager/manager.py b/rnd/nextgenautogpt/nextgenautogpt/manager/manager.py deleted file mode 100644 index e18326e8cc2..00000000000 --- a/rnd/nextgenautogpt/nextgenautogpt/manager/manager.py +++ /dev/null @@ -1,33 +0,0 @@ -import multiprocessing as mp -import time - -import nextgenautogpt.executor.executor as mod_executor - - -def run_manager(server_to_manager: mp.Queue, manager_to_server: mp.Queue) -> None: - # Create queue for communication between manager and executors - print("Manager process started") - manager_to_executor = mp.Queue() - executors_to_manager = mp.Queue() - # Create and start a pool of executor processes - with mp.Pool( - processes=5, - initializer=mod_executor.run_executor, - initargs=( - manager_to_executor, - executors_to_manager, - ), - ): - while True: - if not server_to_manager.empty(): - message = server_to_manager.get() - print(f"Manager received: {message}") - manager_to_server.put("Manager: Received message from server") - manager_to_executor.put("Task for executor") - # Simulate manager work - time.sleep(1) - if not executors_to_manager.empty(): - message = executors_to_manager.get() - print(f"Manager received: {message}") - # Simulate manager work - time.sleep(1) diff --git a/rnd/nextgenautogpt/nextgenautogpt/server/__init__.py b/rnd/nextgenautogpt/nextgenautogpt/server/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/rnd/nextgenautogpt/nextgenautogpt/server/server.py b/rnd/nextgenautogpt/nextgenautogpt/server/server.py deleted file mode 100644 index 23ead8ea2c9..00000000000 --- a/rnd/nextgenautogpt/nextgenautogpt/server/server.py +++ /dev/null @@ -1,17 +0,0 @@ -import multiprocessing as mp -import time -from typing import Any - - -def run_server(server_to_manager: mp.Queue, manager_to_server: mp.Queue) -> None: - print("Server process started") - while True: - message = "Message from server" - server_to_manager.put(message) - # Simulate server work - time.sleep(1) - if not manager_to_server.empty(): - message = manager_to_server.get() - print(f"Server received: {message}") - # Simulate server work - time.sleep(1)