Skip to content

Commit

Permalink
Merge branch 'master' of github.com:Significant-Gravitas/AutoGPT into…
Browse files Browse the repository at this point in the history
… feat/jupyter-notebook-integration
  • Loading branch information
Mahdi Karami committed May 2, 2024
2 parents acdc09a + d57ccf7 commit 1fb99f1
Show file tree
Hide file tree
Showing 119 changed files with 4,320 additions and 5,872 deletions.
1 change: 1 addition & 0 deletions .github/workflows/pr-label.yml
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ jobs:
l_label: 'size/l'
l_max_size: 500
xl_label: 'size/xl'
message_if_xl:

scope:
if: ${{ github.event_name == 'pull_request_target' }}
Expand Down
15 changes: 6 additions & 9 deletions autogpts/autogpt/.env.template
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ OPENAI_API_KEY=your-openai-api-key
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./data/agents/<agent_id>/workspace (Default: True)
# RESTRICT_TO_WORKSPACE=True

## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled (Default: None)
# DISABLED_COMMAND_CATEGORIES=
## DISABLED_COMMANDS - The comma separated list of commands that are disabled (Default: None)
# DISABLED_COMMANDS=

## FILE_STORAGE_BACKEND - Choose a storage backend for contents
## Options: local, gcs, s3
Expand All @@ -44,9 +44,6 @@ OPENAI_API_KEY=your-openai-api-key
## AI_SETTINGS_FILE - Specifies which AI Settings file to use, relative to the AutoGPT root directory. (defaults to ai_settings.yaml)
# AI_SETTINGS_FILE=ai_settings.yaml

## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file, relative to the AutoGPT root directory. (Default plugins_config.yaml)
# PLUGINS_CONFIG_FILE=plugins_config.yaml

## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use, relative to the AutoGPT root directory. (defaults to prompt_settings.yaml)
# PROMPT_SETTINGS_FILE=prompt_settings.yaml

Expand Down Expand Up @@ -90,11 +87,11 @@ OPENAI_API_KEY=your-openai-api-key
### LLM MODELS
################################################################################

## SMART_LLM - Smart language model (Default: gpt-4-turbo-preview)
# SMART_LLM=gpt-4-turbo-preview
## SMART_LLM - Smart language model (Default: gpt-4-turbo)
# SMART_LLM=gpt-4-turbo

## FAST_LLM - Fast language model (Default: gpt-3.5-turbo-0125)
# FAST_LLM=gpt-3.5-turbo-0125
## FAST_LLM - Fast language model (Default: gpt-3.5-turbo)
# FAST_LLM=gpt-3.5-turbo

## EMBEDDING_MODEL - Model to use for creating embeddings
# EMBEDDING_MODEL=text-embedding-3-small
Expand Down
22 changes: 10 additions & 12 deletions autogpts/autogpt/agbenchmark_config/benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,11 @@

from autogpt.agent_manager.agent_manager import AgentManager
from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
from autogpt.agents.prompt_strategies.one_shot import OneShotAgentPromptStrategy
from autogpt.app.main import _configure_openai_provider, run_interaction_loop
from autogpt.commands import COMMAND_CATEGORIES
from autogpt.config import AIProfile, ConfigBuilder
from autogpt.file_storage import FileStorageBackendName, get_storage
from autogpt.logs.config import configure_logging
from autogpt.models.command_registry import CommandRegistry

LOG_DIR = Path(__file__).parent / "logs"

Expand All @@ -21,26 +20,27 @@ def run_specific_agent(task: str, continuous_mode: bool = False) -> None:


def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
config = ConfigBuilder.build_config_from_env()
config.logging.level = logging.DEBUG
config.logging.log_dir = LOG_DIR
config.logging.plain_console_output = True
configure_logging(**config.logging.dict())
configure_logging(
level=logging.DEBUG,
log_dir=LOG_DIR,
plain_console_output=True,
)

config = ConfigBuilder.build_config_from_env()
config.continuous_mode = continuous_mode
config.continuous_limit = 20
config.noninteractive_mode = True
config.memory_backend = "no_memory"

command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)

ai_profile = AIProfile(
ai_name="AutoGPT",
ai_role="a multi-purpose AI assistant.",
ai_goals=[task],
)

agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True)
agent_prompt_config = OneShotAgentPromptStrategy.default_configuration.copy(
deep=True
)
agent_prompt_config.use_functions_api = config.openai_functions
agent_settings = AgentSettings(
name=Agent.default_settings.name,
Expand All @@ -52,7 +52,6 @@ def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
smart_llm=config.smart_llm,
allow_fs_access=not config.restrict_to_workspace,
use_functions_api=config.openai_functions,
plugins=config.plugins,
),
prompt_config=agent_prompt_config,
history=Agent.default_settings.history.copy(deep=True),
Expand All @@ -68,7 +67,6 @@ def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
agent = Agent(
settings=agent_settings,
llm_provider=_configure_openai_provider(config),
command_registry=command_registry,
file_storage=file_storage,
legacy_config=config,
)
Expand Down
19 changes: 0 additions & 19 deletions autogpts/autogpt/autogpt/agent_factory/configurators.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,9 @@
from typing import Optional

from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings
from autogpt.commands import COMMAND_CATEGORIES
from autogpt.config import AIDirectives, AIProfile, Config
from autogpt.core.resource.model_providers import ChatModelProvider
from autogpt.file_storage.base import FileStorage
from autogpt.logs.config import configure_chat_plugins
from autogpt.models.command_registry import CommandRegistry
from autogpt.plugins import scan_plugins


def create_agent(
Expand Down Expand Up @@ -67,15 +63,6 @@ def _configure_agent(
" must be specified"
)

app_config.plugins = scan_plugins(app_config)
configure_chat_plugins(app_config)

# Create a CommandRegistry instance and scan default folder
command_registry = CommandRegistry.with_command_modules(
modules=COMMAND_CATEGORIES,
config=app_config,
)

agent_state = state or create_agent_state(
agent_id=agent_id,
task=task,
Expand All @@ -89,7 +76,6 @@ def _configure_agent(
return Agent(
settings=agent_state,
llm_provider=llm_provider,
command_registry=command_registry,
file_storage=file_storage,
legacy_config=app_config,
)
Expand All @@ -102,9 +88,6 @@ def create_agent_state(
directives: AIDirectives,
app_config: Config,
) -> AgentSettings:
agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True)
agent_prompt_config.use_functions_api = app_config.openai_functions

return AgentSettings(
agent_id=agent_id,
name=Agent.default_settings.name,
Expand All @@ -117,8 +100,6 @@ def create_agent_state(
smart_llm=app_config.smart_llm,
allow_fs_access=not app_config.restrict_to_workspace,
use_functions_api=app_config.openai_functions,
plugins=app_config.plugins,
),
prompt_config=agent_prompt_config,
history=Agent.default_settings.history.copy(deep=True),
)
5 changes: 1 addition & 4 deletions autogpts/autogpt/autogpt/agent_factory/profile_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
CompletionModelFunction,
)
from autogpt.core.utils.json_schema import JSONSchema
from autogpt.core.utils.json_utils import json_loads

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -203,9 +202,7 @@ def parse_response_content(
f"LLM did not call {self._create_agent_function.name} function; "
"agent profile creation failed"
)
arguments: object = json_loads(
response_content.tool_calls[0].function.arguments
)
arguments: object = response_content.tool_calls[0].function.arguments
ai_profile = AIProfile(
ai_name=arguments.get("name"),
ai_role=arguments.get("description"),
Expand Down
6 changes: 3 additions & 3 deletions autogpts/autogpt/autogpt/agent_manager/agent_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@ def generate_id(agent_name: str) -> str:
def list_agents(self) -> list[str]:
"""Return all agent directories within storage."""
agent_dirs: list[str] = []
for dir in self.file_manager.list_folders():
if self.file_manager.exists(dir / "state.json"):
agent_dirs.append(dir.name)
for file_path in self.file_manager.list_files():
if len(file_path.parts) == 2 and file_path.name == "state.json":
agent_dirs.append(file_path.parent.name)
return agent_dirs

def get_agent_dir(self, agent_id: str) -> Path:
Expand Down
1 change: 1 addition & 0 deletions autogpts/autogpt/autogpt/agents/README.md
11 changes: 8 additions & 3 deletions autogpts/autogpt/autogpt/agents/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
from .agent import Agent
from .base import AgentThoughts, BaseAgent, CommandArgs, CommandName
from .agent import Agent, OneShotAgentActionProposal
from .base import BaseAgent, BaseAgentActionProposal

__all__ = ["BaseAgent", "Agent", "CommandName", "CommandArgs", "AgentThoughts"]
__all__ = [
"BaseAgent",
"Agent",
"BaseAgentActionProposal",
"OneShotAgentActionProposal",
]

0 comments on commit 1fb99f1

Please sign in to comment.