Skip to content

Commit

Permalink
Merge branch 'master' into agbenchmark-tuto
Browse files Browse the repository at this point in the history
  • Loading branch information
collijk committed Aug 1, 2023
2 parents acdf599 + ad18f77 commit 9e85b55
Show file tree
Hide file tree
Showing 43 changed files with 559 additions and 309 deletions.
8 changes: 4 additions & 4 deletions .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@ OPENAI_API_KEY=your-openai-api-key
## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"

## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
## AI_SETTINGS_FILE - Specifies which AI Settings file to use, relative to the Auto-GPT root directory. (defaults to ai_settings.yaml)
# AI_SETTINGS_FILE=ai_settings.yaml

## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file (Default plugins_config.yaml)
## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file, relative to the Auto-GPT root directory. (Default plugins_config.yaml)
# PLUGINS_CONFIG_FILE=plugins_config.yaml

## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use (defaults to prompt_settings.yaml)
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use, relative to the Auto-GPT root directory. (defaults to prompt_settings.yaml)
# PROMPT_SETTINGS_FILE=prompt_settings.yaml

## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
Expand Down Expand Up @@ -58,7 +58,7 @@ OPENAI_API_KEY=your-openai-api-key
## USE_AZURE - Use Azure OpenAI or not (Default: False)
# USE_AZURE=False

## AZURE_CONFIG_FILE - The path to the azure.yaml file (Default: azure.yaml)
## AZURE_CONFIG_FILE - The path to the azure.yaml file, relative to the Auto-GPT root directory. (Default: azure.yaml)
# AZURE_CONFIG_FILE=azure.yaml


Expand Down
15 changes: 8 additions & 7 deletions BULLETIN.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,23 +4,24 @@
📖 *User Guide*: https://docs.agpt.co.
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing.

# v0.4.5 RELEASE HIGHLIGHTS! 🚀
# v0.4.6 RELEASE HIGHLIGHTS! 🚀
# -----------------------------
This release includes under-the-hood improvements and bug fixes, such as more
accurate token counts for OpenAI functions, faster CI builds, improved plugin
handling, and refactoring of the Config class for better maintainability.
This release includes under-the-hood improvements and bug fixes, including better UTF-8
special character support, workspace write access for sandboxed Python execution,
more robust path resolution for config files and the workspace, and a full restructure
of the Agent class, the "brain" of Auto-GPT, to make it more extensible.

We have also released some documentation updates, including:

- *How to share your system logs*
Visit [docs/share-your-logs.md] to learn how to how to share logs with us
Visit [docs/share-your-logs.md] to learn how to how to share logs with us
via a log analyzer graciously contributed by https://www.e2b.dev/

- *Auto-GPT re-architecture documentation*
You can learn more about the inner-workings of the Auto-GPT re-architecture
You can learn more about the inner-workings of the Auto-GPT re-architecture
released last cycle, via these links:
* [autogpt/core/README.md]
* [autogpt/core/ARCHITECTURE_NOTES.md]

Take a look at the Release Notes on Github for the full changelog!
Take a look at the Release Notes on Github for the full changelog!
https://github.com/Significant-Gravitas/Auto-GPT/releases.
15 changes: 3 additions & 12 deletions agbenchmark/benchmarks.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import os
import sys
from pathlib import Path
from typing import Tuple
Expand Down Expand Up @@ -25,9 +26,9 @@ def bootstrap_agent(task, continuous_mode) -> Agent:
config.continuous_mode = continuous_mode
config.temperature = 0
config.plain_output = True
command_registry = get_command_registry(config)
command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)
config.memory_backend = "no_memory"
config.workspace_path = Workspace.set_workspace_directory(config)
config.workspace_path = Workspace.init_workspace_directory(config)
config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
ai_config = AIConfig(
ai_name="Auto-GPT",
Expand All @@ -44,16 +45,6 @@ def bootstrap_agent(task, continuous_mode) -> Agent:
)


def get_command_registry(config: Config):
command_registry = CommandRegistry()
enabled_command_categories = [
x for x in COMMAND_CATEGORIES if x not in config.disabled_command_categories
]
for command_category in enabled_command_categories:
command_registry.import_commands(command_category)
return command_registry


if __name__ == "__main__":
# The first argument is the script name itself, second is the task
if len(sys.argv) != 2:
Expand Down
6 changes: 3 additions & 3 deletions autogpt/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,10 +293,10 @@ def execute_command(
# Handle non-native commands (e.g. from plugins)
for command in agent.ai_config.prompt_generator.commands:
if (
command_name == command["label"].lower()
or command_name == command["name"].lower()
command_name == command.label.lower()
or command_name == command.name.lower()
):
return command["function"](**arguments)
return command.function(**arguments)

raise RuntimeError(
f"Cannot execute '{command_name}': unknown command."
Expand Down
111 changes: 101 additions & 10 deletions autogpt/agents/base.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
from __future__ import annotations

import re
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Any, Optional
from typing import TYPE_CHECKING, Any, Literal, Optional

if TYPE_CHECKING:
from autogpt.config import AIConfig, Config
Expand All @@ -23,6 +24,8 @@
class BaseAgent(metaclass=ABCMeta):
"""Base class for all Auto-GPT agents."""

ThoughtProcessID = Literal["one-shot"]

def __init__(
self,
ai_config: AIConfig,
Expand Down Expand Up @@ -91,6 +94,7 @@ def __init__(
def think(
self,
instruction: Optional[str] = None,
thought_process_id: ThoughtProcessID = "one-shot",
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
"""Runs the agent for one cycle.
Expand All @@ -103,8 +107,8 @@ def think(

instruction = instruction or self.default_cycle_instruction

prompt: ChatSequence = self.construct_prompt(instruction)
prompt = self.on_before_think(prompt, instruction)
prompt: ChatSequence = self.construct_prompt(instruction, thought_process_id)
prompt = self.on_before_think(prompt, thought_process_id, instruction)
raw_response = create_chat_completion(
prompt,
self.config,
Expand All @@ -114,7 +118,7 @@ def think(
)
self.cycle_count += 1

return self.on_response(raw_response, prompt, instruction)
return self.on_response(raw_response, thought_process_id, prompt, instruction)

@abstractmethod
def execute(
Expand All @@ -137,6 +141,7 @@ def execute(

def construct_base_prompt(
self,
thought_process_id: ThoughtProcessID,
prepend_messages: list[Message] = [],
append_messages: list[Message] = [],
reserve_tokens: int = 0,
Expand Down Expand Up @@ -178,7 +183,11 @@ def construct_base_prompt(

return prompt

def construct_prompt(self, cycle_instruction: str) -> ChatSequence:
def construct_prompt(
self,
cycle_instruction: str,
thought_process_id: ThoughtProcessID,
) -> ChatSequence:
"""Constructs and returns a prompt with the following structure:
1. System prompt
2. Message history of the agent, truncated & prepended with running summary as needed
Expand All @@ -195,14 +204,86 @@ def construct_prompt(self, cycle_instruction: str) -> ChatSequence:
cycle_instruction_tlength = count_message_tokens(
cycle_instruction_msg, self.llm.name
)
prompt = self.construct_base_prompt(reserve_tokens=cycle_instruction_tlength)

append_messages: list[Message] = []

response_format_instr = self.response_format_instruction(thought_process_id)
if response_format_instr:
append_messages.append(Message("system", response_format_instr))

prompt = self.construct_base_prompt(
thought_process_id,
append_messages=append_messages,
reserve_tokens=cycle_instruction_tlength,
)

# ADD user input message ("triggering prompt")
prompt.append(cycle_instruction_msg)

return prompt

def on_before_think(self, prompt: ChatSequence, instruction: str) -> ChatSequence:
# This can be expanded to support multiple types of (inter)actions within an agent
def response_format_instruction(self, thought_process_id: ThoughtProcessID) -> str:
if thought_process_id != "one-shot":
raise NotImplementedError(f"Unknown thought process '{thought_process_id}'")

RESPONSE_FORMAT_WITH_COMMAND = """```ts
interface Response {
thoughts: {
// Thoughts
text: string;
reasoning: string;
// Short markdown-style bullet list that conveys the long-term plan
plan: string;
// Constructive self-criticism
criticism: string;
// Summary of thoughts to say to the user
speak: string;
};
command: {
name: string;
args: Record<string, any>;
};
}
```"""

RESPONSE_FORMAT_WITHOUT_COMMAND = """```ts
interface Response {
thoughts: {
// Thoughts
text: string;
reasoning: string;
// Short markdown-style bullet list that conveys the long-term plan
plan: string;
// Constructive self-criticism
criticism: string;
// Summary of thoughts to say to the user
speak: string;
};
}
```"""

response_format = re.sub(
r"\n\s+",
"\n",
RESPONSE_FORMAT_WITHOUT_COMMAND
if self.config.openai_functions
else RESPONSE_FORMAT_WITH_COMMAND,
)

use_functions = self.config.openai_functions and self.command_registry.commands
return (
f"Respond strictly with JSON{', and also specify a command to use through a function_call' if use_functions else ''}. "
"The JSON should be compatible with the TypeScript type `Response` from the following:\n"
f"{response_format}\n"
)

def on_before_think(
self,
prompt: ChatSequence,
thought_process_id: ThoughtProcessID,
instruction: str,
) -> ChatSequence:
"""Called after constructing the prompt but before executing it.
Calls the `on_planning` hook of any enabled and capable plugins, adding their
Expand Down Expand Up @@ -237,7 +318,11 @@ def on_before_think(self, prompt: ChatSequence, instruction: str) -> ChatSequenc
return prompt

def on_response(
self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str
self,
llm_response: ChatModelResponse,
thought_process_id: ThoughtProcessID,
prompt: ChatSequence,
instruction: str,
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
"""Called upon receiving a response from the chat model.
Expand All @@ -260,7 +345,9 @@ def on_response(
) # FIXME: support function calls

try:
return self.parse_and_process_response(llm_response, prompt, instruction)
return self.parse_and_process_response(
llm_response, thought_process_id, prompt, instruction
)
except SyntaxError as e:
logger.error(f"Response could not be parsed: {e}")
# TODO: tune this message
Expand All @@ -275,7 +362,11 @@ def on_response(

@abstractmethod
def parse_and_process_response(
self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str
self,
llm_response: ChatModelResponse,
thought_process_id: ThoughtProcessID,
prompt: ChatSequence,
instruction: str,
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
"""Validate, parse & process the LLM's response.
Expand Down
9 changes: 7 additions & 2 deletions autogpt/app/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,10 @@
@click.option(
"--ai-settings",
"-C",
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
help=(
"Specifies which ai_settings.yaml file to use, relative to the Auto-GPT"
" root directory. Will also automatically skip the re-prompt."
),
)
@click.option(
"--prompt-settings",
Expand Down Expand Up @@ -129,7 +132,9 @@ def main(
browser_name=browser_name,
allow_downloads=allow_downloads,
skip_news=skip_news,
working_directory=Path(__file__).parent.parent, # TODO: make this an option
working_directory=Path(
__file__
).parent.parent.parent, # TODO: make this an option
workspace_directory=workspace_directory,
install_plugin_deps=install_plugin_deps,
ai_name=ai_name,
Expand Down

0 comments on commit 9e85b55

Please sign in to comment.