Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(rnd): add FastAPI support to existing project outline #7165

Merged
merged 19 commits into from
Jun 3, 2024
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions rnd/autogpt_server/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# Next Gen AutoGPT

This is a research project into creating the next generation of autogpt, which is an autogpt agent server.

The agent server will enable the creation of composite multi-agent system that utilize AutoGPT Agent as its default agent.


## Project Outline

Currently the project mainly consist of these components:

*agent_api*
A component that will expose API endpoints for the creation & execution of agents.
This component will make connections to the database to persist and read the agents.
It will also trigger the agent execution by pushing its execution request to the ExecutionQueue.

*agent_executor*
A component that will execute the agents.
This component will be a pool of processes/threads that will consume the ExecutionQueue and execute the agent accordingly.
The result and progress of its execution will be persisted in the database.
1 change: 1 addition & 0 deletions rnd/autogpt_server/autogpt_server/agent_api/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .server import start_server # noqa
majdyz marked this conversation as resolved.
Show resolved Hide resolved
29 changes: 29 additions & 0 deletions rnd/autogpt_server/autogpt_server/agent_api/server.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import uvicorn
from fastapi import FastAPI

from autogpt_server.data import ExecutionQueue

app = FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
)

execution_queue: ExecutionQueue = None


@app.post("/agents/{agent_id}/execute")
def execute_agent(agent_id: str):
execution_id = execution_queue.add(agent_id)
return {"execution_id": execution_id, "agent_id": agent_id}


def start_server(queue: ExecutionQueue, use_uvicorn: bool = True):
global execution_queue
majdyz marked this conversation as resolved.
Show resolved Hide resolved
execution_queue = queue
if use_uvicorn:
uvicorn.run(app)
return app
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .executor import start_executor # noqa
35 changes: 35 additions & 0 deletions rnd/autogpt_server/autogpt_server/agent_executor/executor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import logging
import time

from multiprocessing import Process
from concurrent.futures import ThreadPoolExecutor

from autogpt_server.data import ExecutionQueue

logger = logging.getLogger(__name__)


# TODO: Replace this by an actual Agent Execution.
def __execute(id: str, data: str) -> None:
majdyz marked this conversation as resolved.
Show resolved Hide resolved
logger.warning(f"Executor processing started, execution_id: {id}, data: {data}")
for i in range(5):
logger.warning(
f"Executor processing step {i}, execution_id: {id}, data: {data}"
)
time.sleep(1)
logger.warning(f"Executor processing completed, execution_id: {id}, data: {data}")


def __start_executor(pool_size: int, queue: ExecutionQueue) -> None:
with ThreadPoolExecutor(max_workers=pool_size) as executor:
while True:
execution = queue.get()
if not execution:
time.sleep(1)
continue
executor.submit(__execute, execution.execution_id, execution.data)


def start_executor(pool_size: int, queue: ExecutionQueue) -> None:
executor_process = Process(target=__start_executor, args=(pool_size, queue))
executor_process.start()
11 changes: 11 additions & 0 deletions rnd/autogpt_server/autogpt_server/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
from autogpt_server.agent_api import start_server
from autogpt_server.agent_executor import start_executor
from autogpt_server.data import ExecutionQueue

def main() -> None:
queue = ExecutionQueue()
start_executor(5, queue)
start_server(queue)
majdyz marked this conversation as resolved.
Show resolved Hide resolved

if __name__ == "__main__":
main()
36 changes: 36 additions & 0 deletions rnd/autogpt_server/autogpt_server/data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import uuid
from multiprocessing import Queue


class Execution:
"""Data model for an execution of an Agent"""

def __init__(self, execution_id: str, data: str):
self.execution_id = execution_id
self.data = data


# TODO: This shared class make api & executor coupled in one machine.
# Replace this with a persistent & remote-hosted queue.
# One very likely candidate would be persisted Redis (Redis Queue).
# It will also open the possibility of using it for other purposes like
# caching, execution engine broker (like Celery), user session management etc.
class ExecutionQueue:
"""
Queue for managing the execution of agents.
This will be shared between different processes
"""

def __init__(self):
self.queue = Queue()

def add(self, data: str) -> str:
execution_id = uuid.uuid4()
self.queue.put(Execution(str(execution_id), data))
return str(execution_id)

def get(self) -> Execution | None:
return self.queue.get()

def empty(self) -> bool:
return self.queue.empty()

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[tool.poetry]
name = "rnd"
name = "autogpt_server"
version = "0.1.0"
description = ""
authors = ["SwiftyOS <craigswift13@gmail.com>"]
majdyz marked this conversation as resolved.
Show resolved Hide resolved
Expand All @@ -9,6 +9,7 @@ readme = "README.md"
python = "^3.10"
click = "^8.1.7"
pydantic = "^2.7.1"
pytest = "^8.2.1"


[build-system]
Expand Down
3 changes: 3 additions & 0 deletions rnd/autogpt_server/run.sh
majdyz marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/bin/bash

python3 -m autogpt_server.app
majdyz marked this conversation as resolved.
Show resolved Hide resolved
31 changes: 31 additions & 0 deletions rnd/autogpt_server/test/test_app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import pytest

from autogpt_server.data import ExecutionQueue
from autogpt_server.agent_api import start_server
from autogpt_server.agent_executor import start_executor
from fastapi.testclient import TestClient

@pytest.fixture
def client():
execution_queue = ExecutionQueue()
start_executor(5, execution_queue)
return TestClient(start_server(execution_queue, use_uvicorn=False))


def test_execute_agent(client):
# Assert API is working
response = client.post("/agents/dummy_agent_1/execute")
assert response.status_code == 200

# Assert response is correct
data = response.json()
exec_id = data["execution_id"]
agent_id = data["agent_id"]
assert agent_id == "dummy_agent_1"
assert isinstance(exec_id, str)
assert len(exec_id) == 36

# TODO: Add assertion that the executor is executed after some time
# Add this when db integration is done.


27 changes: 0 additions & 27 deletions rnd/nextgenautogpt/README.md

This file was deleted.

Empty file.
39 changes: 0 additions & 39 deletions rnd/nextgenautogpt/nextgenautogpt/__main__.py

This file was deleted.

Empty file.
Empty file.
15 changes: 0 additions & 15 deletions rnd/nextgenautogpt/nextgenautogpt/executor/executor.py

This file was deleted.

Empty file.
Loading
Loading