Skip to content

Commit

Permalink
Merge branch 'Significant-Gravitas:master' into a-slip-of-a-pen
Browse files Browse the repository at this point in the history
  • Loading branch information
cyrus-hawk committed Jul 31, 2023
2 parents 40faa27 + b7f1df3 commit f83b187
Show file tree
Hide file tree
Showing 29 changed files with 381 additions and 258 deletions.
13 changes: 2 additions & 11 deletions agbenchmark/benchmarks.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import os
import sys
from pathlib import Path
from typing import Tuple
Expand Down Expand Up @@ -25,7 +26,7 @@ def bootstrap_agent(task, continuous_mode) -> Agent:
config.continuous_mode = continuous_mode
config.temperature = 0
config.plain_output = True
command_registry = get_command_registry(config)
command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)
config.memory_backend = "no_memory"
config.workspace_path = Workspace.init_workspace_directory(config)
config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
Expand All @@ -44,16 +45,6 @@ def bootstrap_agent(task, continuous_mode) -> Agent:
)


def get_command_registry(config: Config):
command_registry = CommandRegistry()
enabled_command_categories = [
x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories
]
for command_category in enabled_command_categories:
command_registry.import_commands(command_category)
return command_registry


if __name__ == "__main__":
# The first argument is the script name itself, second is the task
if len(sys.argv) != 2:
Expand Down
6 changes: 3 additions & 3 deletions autogpt/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,10 +293,10 @@ def execute_command(
# Handle non-native commands (e.g. from plugins)
for command in agent.ai_config.prompt_generator.commands:
if (
command_name == command["label"].lower()
or command_name == command["name"].lower()
command_name == command.label.lower()
or command_name == command.name.lower()
):
return command["function"](**arguments)
return command.function(**arguments)

raise RuntimeError(
f"Cannot execute '{command_name}': unknown command."
Expand Down
111 changes: 101 additions & 10 deletions autogpt/agents/base.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
from __future__ import annotations

import re
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Any, Optional
from typing import TYPE_CHECKING, Any, Literal, Optional

if TYPE_CHECKING:
from autogpt.config import AIConfig, Config
Expand All @@ -23,6 +24,8 @@
class BaseAgent(metaclass=ABCMeta):
"""Base class for all Auto-GPT agents."""

ThoughtProcessID = Literal["one-shot"]

def __init__(
self,
ai_config: AIConfig,
Expand Down Expand Up @@ -91,6 +94,7 @@ def __init__(
def think(
self,
instruction: Optional[str] = None,
thought_process_id: ThoughtProcessID = "one-shot",
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
"""Runs the agent for one cycle.
Expand All @@ -103,8 +107,8 @@ def think(

instruction = instruction or self.default_cycle_instruction

prompt: ChatSequence = self.construct_prompt(instruction)
prompt = self.on_before_think(prompt, instruction)
prompt: ChatSequence = self.construct_prompt(instruction, thought_process_id)
prompt = self.on_before_think(prompt, thought_process_id, instruction)
raw_response = create_chat_completion(
prompt,
self.config,
Expand All @@ -114,7 +118,7 @@ def think(
)
self.cycle_count += 1

return self.on_response(raw_response, prompt, instruction)
return self.on_response(raw_response, thought_process_id, prompt, instruction)

@abstractmethod
def execute(
Expand All @@ -137,6 +141,7 @@ def execute(

def construct_base_prompt(
self,
thought_process_id: ThoughtProcessID,
prepend_messages: list[Message] = [],
append_messages: list[Message] = [],
reserve_tokens: int = 0,
Expand Down Expand Up @@ -178,7 +183,11 @@ def construct_base_prompt(

return prompt

def construct_prompt(self, cycle_instruction: str) -> ChatSequence:
def construct_prompt(
self,
cycle_instruction: str,
thought_process_id: ThoughtProcessID,
) -> ChatSequence:
"""Constructs and returns a prompt with the following structure:
1. System prompt
2. Message history of the agent, truncated & prepended with running summary as needed
Expand All @@ -195,14 +204,86 @@ def construct_prompt(self, cycle_instruction: str) -> ChatSequence:
cycle_instruction_tlength = count_message_tokens(
cycle_instruction_msg, self.llm.name
)
prompt = self.construct_base_prompt(reserve_tokens=cycle_instruction_tlength)

append_messages: list[Message] = []

response_format_instr = self.response_format_instruction(thought_process_id)
if response_format_instr:
append_messages.append(Message("system", response_format_instr))

prompt = self.construct_base_prompt(
thought_process_id,
append_messages=append_messages,
reserve_tokens=cycle_instruction_tlength,
)

# ADD user input message ("triggering prompt")
prompt.append(cycle_instruction_msg)

return prompt

def on_before_think(self, prompt: ChatSequence, instruction: str) -> ChatSequence:
# This can be expanded to support multiple types of (inter)actions within an agent
def response_format_instruction(self, thought_process_id: ThoughtProcessID) -> str:
if thought_process_id != "one-shot":
raise NotImplementedError(f"Unknown thought process '{thought_process_id}'")

RESPONSE_FORMAT_WITH_COMMAND = """```ts
interface Response {
thoughts: {
// Thoughts
text: string;
reasoning: string;
// Short markdown-style bullet list that conveys the long-term plan
plan: string;
// Constructive self-criticism
criticism: string;
// Summary of thoughts to say to the user
speak: string;
};
command: {
name: string;
args: Record<string, any>;
};
}
```"""

RESPONSE_FORMAT_WITHOUT_COMMAND = """```ts
interface Response {
thoughts: {
// Thoughts
text: string;
reasoning: string;
// Short markdown-style bullet list that conveys the long-term plan
plan: string;
// Constructive self-criticism
criticism: string;
// Summary of thoughts to say to the user
speak: string;
};
}
```"""

response_format = re.sub(
r"\n\s+",
"\n",
RESPONSE_FORMAT_WITHOUT_COMMAND
if self.config.openai_functions
else RESPONSE_FORMAT_WITH_COMMAND,
)

use_functions = self.config.openai_functions and self.command_registry.commands
return (
f"Respond strictly with JSON{', and also specify a command to use through a function_call' if use_functions else ''}. "
"The JSON should be compatible with the TypeScript type `Response` from the following:\n"
f"{response_format}\n"
)

def on_before_think(
self,
prompt: ChatSequence,
thought_process_id: ThoughtProcessID,
instruction: str,
) -> ChatSequence:
"""Called after constructing the prompt but before executing it.
Calls the `on_planning` hook of any enabled and capable plugins, adding their
Expand Down Expand Up @@ -237,7 +318,11 @@ def on_before_think(self, prompt: ChatSequence, instruction: str) -> ChatSequenc
return prompt

def on_response(
self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str
self,
llm_response: ChatModelResponse,
thought_process_id: ThoughtProcessID,
prompt: ChatSequence,
instruction: str,
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
"""Called upon receiving a response from the chat model.
Expand All @@ -260,7 +345,9 @@ def on_response(
) # FIXME: support function calls

try:
return self.parse_and_process_response(llm_response, prompt, instruction)
return self.parse_and_process_response(
llm_response, thought_process_id, prompt, instruction
)
except SyntaxError as e:
logger.error(f"Response could not be parsed: {e}")
# TODO: tune this message
Expand All @@ -275,7 +362,11 @@ def on_response(

@abstractmethod
def parse_and_process_response(
self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str
self,
llm_response: ChatModelResponse,
thought_process_id: ThoughtProcessID,
prompt: ChatSequence,
instruction: str,
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
"""Validate, parse & process the LLM's response.
Expand Down
31 changes: 2 additions & 29 deletions autogpt/app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,36 +134,9 @@ def run_auto_gpt(
config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)

config.plugins = scan_plugins(config, config.debug_mode)
# Create a CommandRegistry instance and scan default folder
command_registry = CommandRegistry()

logger.debug(
f"The following command categories are disabled: {config.disabled_command_categories}"
)
enabled_command_categories = [
x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories
]

logger.debug(
f"The following command categories are enabled: {enabled_command_categories}"
)

for command_category in enabled_command_categories:
command_registry.import_commands(command_category)

# Unregister commands that are incompatible with the current config
incompatible_commands = []
for command in command_registry.commands.values():
if callable(command.enabled) and not command.enabled(config):
command.enabled = False
incompatible_commands.append(command)

for command in incompatible_commands:
command_registry.unregister(command)
logger.debug(
f"Unregistering incompatible command: {command.name}, "
f"reason - {command.disabled_reason or 'Disabled by current config.'}"
)
# Create a CommandRegistry instance and scan default folder
command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)

ai_config = construct_main_ai_config(
config,
Expand Down
1 change: 1 addition & 0 deletions autogpt/app/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ def prompt_user(
"Falling back to manual mode.",
speak_text=True,
)
logger.debug(f"Error during AIConfig generation: {e}")

return generate_aiconfig_manual(config)

Expand Down
2 changes: 1 addition & 1 deletion autogpt/commands/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@
"autogpt.commands.file_operations",
"autogpt.commands.web_search",
"autogpt.commands.web_selenium",
"autogpt.commands.task_statuses",
"autogpt.commands.system",
]
10 changes: 7 additions & 3 deletions autogpt/commands/execute_code.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
"""Execute code in a Docker container"""
"""Commands to execute code"""

COMMAND_CATEGORY = "execute_code"
COMMAND_CATEGORY_TITLE = "Execute Code"

import os
import subprocess
from pathlib import Path
Expand Down Expand Up @@ -251,9 +255,9 @@ def execute_shell(command_line: str, agent: Agent) -> str:
"execute_shell_popen",
"Executes a Shell Command, non-interactive commands only",
{
"query": {
"command_line": {
"type": "string",
"description": "The search query",
"description": "The command line to execute",
"required": True,
}
},
Expand Down
22 changes: 5 additions & 17 deletions autogpt/commands/file_operations.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
"""File operations for AutoGPT"""
"""Commands to perform operations on files"""

from __future__ import annotations

COMMAND_CATEGORY = "file_operations"
COMMAND_CATEGORY_TITLE = "File Operations"

import contextlib
import hashlib
import os
Expand Down Expand Up @@ -228,22 +232,6 @@ def write_to_file(filename: str, text: str, agent: Agent) -> str:
return f"Error: {err}"


@command(
"append_to_file",
"Appends to a file",
{
"filename": {
"type": "string",
"description": "The name of the file to write to",
"required": True,
},
"text": {
"type": "string",
"description": "The text to write to the file",
"required": True,
},
},
)
@sanitize_path_arg("filename")
def append_to_file(
filename: str, text: str, agent: Agent, should_log: bool = True
Expand Down
5 changes: 4 additions & 1 deletion autogpt/commands/git_operations.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
"""Git operations for autogpt"""
"""Commands to perform Git operations"""

COMMAND_CATEGORY = "git_operations"
COMMAND_CATEGORY_TITLE = "Git Operations"

from git.repo import Repo

Expand Down
6 changes: 5 additions & 1 deletion autogpt/commands/image_gen.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
""" Image Generation Module for AutoGPT."""
"""Commands to generate images based on text input"""

COMMAND_CATEGORY = "text_to_image"
COMMAND_CATEGORY_TITLE = "Text to Image"

import io
import json
import time
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
"""Task Statuses module."""
"""Commands to control the internal state of the program"""

from __future__ import annotations

COMMAND_CATEGORY = "system"
COMMAND_CATEGORY_TITLE = "System"

from typing import NoReturn

from autogpt.agents.agent import Agent
Expand Down
6 changes: 5 additions & 1 deletion autogpt/commands/web_search.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
"""Google search command for Autogpt."""
"""Commands to search the web with"""

from __future__ import annotations

COMMAND_CATEGORY = "web_search"
COMMAND_CATEGORY_TITLE = "Web Search"

import json
import time
from itertools import islice
Expand Down

0 comments on commit f83b187

Please sign in to comment.