Skip to content

Commit

Permalink
clean(autogpt): Remove old plugin system (#7097)
Browse files Browse the repository at this point in the history
### Background

Follow up after merging #7054, old plugins will no longer be used.

### Changes 🏗️

- Removed all dead code needed to load and use plugins.
- Removed `auto-gpt-plugin-template` dependency
- Removed `rev=` from `autogpt-forge` dependency (the set `rev` had incompatible `duckduckgo-search` versions)
- Kept `--install-plugin-deps` CLI option and dead code associated (may be needed for new plugins)
  • Loading branch information
kcze committed Apr 28, 2024
1 parent 0014e2a commit d38e8b8
Show file tree
Hide file tree
Showing 26 changed files with 11 additions and 1,490 deletions.
3 changes: 0 additions & 3 deletions autogpts/autogpt/.env.template
Expand Up @@ -44,9 +44,6 @@ OPENAI_API_KEY=your-openai-api-key
## AI_SETTINGS_FILE - Specifies which AI Settings file to use, relative to the AutoGPT root directory. (defaults to ai_settings.yaml)
# AI_SETTINGS_FILE=ai_settings.yaml

## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file, relative to the AutoGPT root directory. (Default plugins_config.yaml)
# PLUGINS_CONFIG_FILE=plugins_config.yaml

## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use, relative to the AutoGPT root directory. (defaults to prompt_settings.yaml)
# PROMPT_SETTINGS_FILE=prompt_settings.yaml

Expand Down
1 change: 0 additions & 1 deletion autogpts/autogpt/agbenchmark_config/benchmarks.py
Expand Up @@ -52,7 +52,6 @@ def bootstrap_agent(task: str, continuous_mode: bool) -> Agent:
smart_llm=config.smart_llm,
allow_fs_access=not config.restrict_to_workspace,
use_functions_api=config.openai_functions,
plugins=config.plugins,
),
prompt_config=agent_prompt_config,
history=Agent.default_settings.history.copy(deep=True),
Expand Down
6 changes: 0 additions & 6 deletions autogpts/autogpt/autogpt/agent_factory/configurators.py
Expand Up @@ -4,8 +4,6 @@
from autogpt.config import AIDirectives, AIProfile, Config
from autogpt.core.resource.model_providers import ChatModelProvider
from autogpt.file_storage.base import FileStorage
from autogpt.logs.config import configure_chat_plugins
from autogpt.plugins import scan_plugins


def create_agent(
Expand Down Expand Up @@ -65,9 +63,6 @@ def _configure_agent(
" must be specified"
)

app_config.plugins = scan_plugins(app_config)
configure_chat_plugins(app_config)

agent_state = state or create_agent_state(
agent_id=agent_id,
task=task,
Expand Down Expand Up @@ -105,7 +100,6 @@ def create_agent_state(
smart_llm=app_config.smart_llm,
allow_fs_access=not app_config.restrict_to_workspace,
use_functions_api=app_config.openai_functions,
plugins=app_config.plugins,
),
history=Agent.default_settings.history.copy(deep=True),
)
16 changes: 0 additions & 16 deletions autogpts/autogpt/autogpt/agents/base.py
Expand Up @@ -15,7 +15,6 @@
overload,
)

from auto_gpt_plugin_template import AutoGPTPluginTemplate
from colorama import Fore
from pydantic import BaseModel, Field, validator

Expand Down Expand Up @@ -96,21 +95,6 @@ class BaseAgentConfiguration(SystemConfiguration):
summary_max_tlength: Optional[int] = None
# TODO: move to ActionHistoryConfiguration

plugins: list[AutoGPTPluginTemplate] = Field(default_factory=list, exclude=True)

class Config:
arbitrary_types_allowed = True # Necessary for plugins

@validator("plugins", each_item=True)
def validate_plugins(cls, p: AutoGPTPluginTemplate | Any):
assert issubclass(
p.__class__, AutoGPTPluginTemplate
), f"{p} does not subclass AutoGPTPluginTemplate"
assert (
p.__class__.__name__ != "AutoGPTPluginTemplate"
), f"Plugins must subclass AutoGPTPluginTemplate; {p} is a template instance"
return p

@validator("use_functions_api")
def validate_openai_functions(cls, v: bool, values: dict[str, Any]):
if v:
Expand Down
Expand Up @@ -135,7 +135,6 @@ async def write_to_file(self, filename: str | Path, contents: str) -> str:
Returns:
str: A message indicating success or failure
"""
logger.info(f"self: {self}")
if directory := os.path.dirname(filename):
self.workspace.make_dir(directory)
await self.workspace.write_file(filename, contents)
Expand Down
22 changes: 2 additions & 20 deletions autogpts/autogpt/autogpt/app/main.py
Expand Up @@ -37,13 +37,11 @@
from autogpt.core.resource.model_providers.openai import OpenAIProvider
from autogpt.core.runner.client_lib.utils import coroutine
from autogpt.file_storage import FileStorageBackendName, get_storage
from autogpt.logs.config import LoggingConfig, configure_chat_plugins, configure_logging
from autogpt.logs.config import configure_logging
from autogpt.logs.helpers import print_attribute, speak
from autogpt.models.action_history import ActionInterruptedByHuman
from autogpt.plugins import scan_plugins
from autogpt.utils.exceptions import AgentTerminated, InvalidAgentResponseError
from autogpt.utils.utils import DEFAULT_FINISH_COMMAND
from scripts.install_plugin_deps import install_plugin_dependencies

from .configurator import apply_overrides_to_config
from .setup import apply_overrides_to_ai_settings, interactively_revise_ai_settings
Expand Down Expand Up @@ -166,12 +164,6 @@ async def run_auto_gpt(
title_color=Fore.YELLOW,
)

if install_plugin_deps:
install_plugin_dependencies()

config.plugins = scan_plugins(config)
configure_chat_plugins(config)

# Let user choose an existing agent to run
agent_manager = AgentManager(file_storage)
existing_agents = agent_manager.list_agents()
Expand Down Expand Up @@ -408,11 +400,6 @@ async def run_auto_gpt_server(

llm_provider = _configure_openai_provider(config)

if install_plugin_deps:
install_plugin_dependencies()

config.plugins = scan_plugins(config)

# Set up & start server
database = AgentDB(
database_string=os.getenv("AP_SERVER_DB_URL", "sqlite:///data/ap_server.db"),
Expand Down Expand Up @@ -726,12 +713,7 @@ async def get_user_feedback(

while user_feedback is None:
# Get input from user
if config.chat_messages_enabled:
console_input = clean_input(config, "Waiting for your response...")
else:
console_input = clean_input(
config, Fore.MAGENTA + "Input:" + Style.RESET_ALL
)
console_input = clean_input(config, Fore.MAGENTA + "Input:" + Style.RESET_ALL)

# Parse user input
if console_input.lower().strip() == config.authorise_key:
Expand Down
30 changes: 1 addition & 29 deletions autogpts/autogpt/autogpt/app/utils.py
Expand Up @@ -20,34 +20,6 @@

def clean_input(config: "Config", prompt: str = ""):
try:
if config.chat_messages_enabled:
for plugin in config.plugins:
if not hasattr(plugin, "can_handle_user_input"):
continue
if not plugin.can_handle_user_input(user_input=prompt):
continue
plugin_response = plugin.user_input(user_input=prompt)
if not plugin_response:
continue
if plugin_response.lower() in [
"yes",
"yeah",
"y",
"ok",
"okay",
"sure",
"alright",
]:
return config.authorise_key
elif plugin_response.lower() in [
"no",
"nope",
"n",
"negative",
]:
return config.exit_key
return plugin_response

# ask for input, default when just pressing Enter is y
logger.debug("Asking user via keyboard...")

Expand Down Expand Up @@ -215,7 +187,7 @@ def print_motd(config: "Config", logger: logging.Logger):
},
msg=motd_line,
)
if is_new_motd and not config.chat_messages_enabled:
if is_new_motd:
input(
Fore.MAGENTA
+ Style.BRIGHT
Expand Down
48 changes: 1 addition & 47 deletions autogpts/autogpt/autogpt/config/config.py
Expand Up @@ -7,9 +7,8 @@
from pathlib import Path
from typing import Any, Optional, Union

from auto_gpt_plugin_template import AutoGPTPluginTemplate
from colorama import Fore
from pydantic import Field, SecretStr, validator
from pydantic import SecretStr, validator

import autogpt
from autogpt.app.utils import clean_input
Expand All @@ -25,15 +24,13 @@
)
from autogpt.file_storage import FileStorageBackendName
from autogpt.logs.config import LoggingConfig
from autogpt.plugins.plugins_config import PluginsConfig
from autogpt.speech import TTSConfig

logger = logging.getLogger(__name__)

PROJECT_ROOT = Path(autogpt.__file__).parent.parent
AI_SETTINGS_FILE = Path("ai_settings.yaml")
AZURE_CONFIG_FILE = Path("azure.yaml")
PLUGINS_CONFIG_FILE = Path("plugins_config.yaml")
PROMPT_SETTINGS_FILE = Path("prompt_settings.yaml")

GPT_4_MODEL = OpenAIModelName.GPT4
Expand All @@ -54,9 +51,6 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
authorise_key: str = UserConfigurable(default="y", from_env="AUTHORISE_COMMAND_KEY")
exit_key: str = UserConfigurable(default="n", from_env="EXIT_KEY")
noninteractive_mode: bool = False
chat_messages_enabled: bool = UserConfigurable(
default=True, from_env=lambda: os.getenv("CHAT_MESSAGES_ENABLED") == "True"
)

# TTS configuration
logging: LoggingConfig = LoggingConfig()
Expand Down Expand Up @@ -181,29 +175,6 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
from_env="USER_AGENT",
)

###################
# Plugin Settings #
###################
plugins_dir: str = UserConfigurable("plugins", from_env="PLUGINS_DIR")
plugins_config_file: Path = UserConfigurable(
default=PLUGINS_CONFIG_FILE, from_env="PLUGINS_CONFIG_FILE"
)
plugins_config: PluginsConfig = Field(
default_factory=lambda: PluginsConfig(plugins={})
)
plugins: list[AutoGPTPluginTemplate] = Field(default_factory=list, exclude=True)
plugins_allowlist: list[str] = UserConfigurable(
default_factory=list,
from_env=lambda: _safe_split(os.getenv("ALLOWLISTED_PLUGINS")),
)
plugins_denylist: list[str] = UserConfigurable(
default_factory=list,
from_env=lambda: _safe_split(os.getenv("DENYLISTED_PLUGINS")),
)
plugins_openai: list[str] = UserConfigurable(
default_factory=list, from_env=lambda: _safe_split(os.getenv("OPENAI_PLUGINS"))
)

###############
# Credentials #
###############
Expand Down Expand Up @@ -231,16 +202,6 @@ class Config(SystemSettings, arbitrary_types_allowed=True):
# Stable Diffusion
sd_webui_auth: Optional[str] = UserConfigurable(from_env="SD_WEBUI_AUTH")

@validator("plugins", each_item=True)
def validate_plugins(cls, p: AutoGPTPluginTemplate | Any):
assert issubclass(
p.__class__, AutoGPTPluginTemplate
), f"{p} does not subclass AutoGPTPluginTemplate"
assert (
p.__class__.__name__ != "AutoGPTPluginTemplate"
), f"Plugins must subclass AutoGPTPluginTemplate; {p} is a template instance"
return p

@validator("openai_functions")
def validate_openai_functions(cls, v: bool, values: dict[str, Any]):
if v:
Expand All @@ -266,7 +227,6 @@ def build_config_from_env(cls, project_root: Path = PROJECT_ROOT) -> Config:
for k in {
"ai_settings_file", # TODO: deprecate or repurpose
"prompt_settings_file", # TODO: deprecate or repurpose
"plugins_config_file", # TODO: move from project root
"azure_config_file", # TODO: move from project root
}:
setattr(config, k, project_root / getattr(config, k))
Expand All @@ -278,12 +238,6 @@ def build_config_from_env(cls, project_root: Path = PROJECT_ROOT) -> Config:
):
config.openai_credentials.load_azure_config(config_file)

config.plugins_config = PluginsConfig.load_config(
config.plugins_config_file,
config.plugins_denylist,
config.plugins_allowlist,
)

return config


Expand Down
3 changes: 1 addition & 2 deletions autogpts/autogpt/autogpt/logs/__init__.py
@@ -1,4 +1,4 @@
from .config import configure_chat_plugins, configure_logging
from .config import configure_logging
from .helpers import user_friendly_output
from .log_cycle import (
CURRENT_CONTEXT_FILE_NAME,
Expand All @@ -13,7 +13,6 @@

__all__ = [
"configure_logging",
"configure_chat_plugins",
"user_friendly_output",
"CURRENT_CONTEXT_FILE_NAME",
"NEXT_ACTION_FILE_NAME",
Expand Down
20 changes: 0 additions & 20 deletions autogpts/autogpt/autogpt/logs/config.py
Expand Up @@ -8,11 +8,9 @@
from pathlib import Path
from typing import TYPE_CHECKING, Optional

from auto_gpt_plugin_template import AutoGPTPluginTemplate
from openai._base_client import log as openai_logger

if TYPE_CHECKING:
from autogpt.config import Config
from autogpt.speech import TTSConfig

from autogpt.core.configuration import SystemConfiguration, UserConfigurable
Expand All @@ -34,8 +32,6 @@
SPEECH_OUTPUT_LOGGER = "VOICE"
USER_FRIENDLY_OUTPUT_LOGGER = "USER_FRIENDLY_OUTPUT"

_chat_plugins: list[AutoGPTPluginTemplate] = []


class LogFormatName(str, enum.Enum):
SIMPLE = "simple"
Expand Down Expand Up @@ -222,19 +218,3 @@ def configure_logging(

# Disable debug logging from OpenAI library
openai_logger.setLevel(logging.WARNING)


def configure_chat_plugins(config: Config) -> None:
"""Configure chat plugins for use by the logging module"""

logger = logging.getLogger(__name__)

# Add chat plugins capable of report to logger
if config.chat_messages_enabled:
if _chat_plugins:
_chat_plugins.clear()

for plugin in config.plugins:
if hasattr(plugin, "can_handle_report") and plugin.can_handle_report():
logger.debug(f"Loaded plugin into logger: {plugin.__class__.__name__}")
_chat_plugins.append(plugin)
6 changes: 1 addition & 5 deletions autogpts/autogpt/autogpt/logs/helpers.py
Expand Up @@ -3,7 +3,7 @@

from colorama import Fore

from .config import SPEECH_OUTPUT_LOGGER, USER_FRIENDLY_OUTPUT_LOGGER, _chat_plugins
from .config import SPEECH_OUTPUT_LOGGER, USER_FRIENDLY_OUTPUT_LOGGER


def user_friendly_output(
Expand All @@ -21,10 +21,6 @@ def user_friendly_output(
"""
logger = logging.getLogger(USER_FRIENDLY_OUTPUT_LOGGER)

if _chat_plugins:
for plugin in _chat_plugins:
plugin.report(f"{title}: {message}")

logger.log(
level,
message,
Expand Down
18 changes: 0 additions & 18 deletions autogpts/autogpt/autogpt/memory/vector/utils.py
@@ -1,5 +1,4 @@
import logging
from contextlib import suppress
from typing import Any, Sequence, overload

import numpy as np
Expand Down Expand Up @@ -51,16 +50,9 @@ async def get_embedding(

if isinstance(input, str):
input = input.replace("\n", " ")

with suppress(NotImplementedError):
return _get_embedding_with_plugin(input, config)

elif multiple and isinstance(input[0], str):
input = [text.replace("\n", " ") for text in input]

with suppress(NotImplementedError):
return [_get_embedding_with_plugin(i, config) for i in input]

model = config.embedding_model

logger.debug(
Expand All @@ -86,13 +78,3 @@ async def get_embedding(
)
embeddings.append(result.embedding)
return embeddings


def _get_embedding_with_plugin(text: str, config: Config) -> Embedding:
for plugin in config.plugins:
if plugin.can_handle_text_embedding(text):
embedding = plugin.handle_text_embedding(text)
if embedding is not None:
return embedding

raise NotImplementedError

0 comments on commit d38e8b8

Please sign in to comment.