Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Quality update #1694

Merged
merged 1 commit into from
Apr 15, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion autogpt/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,9 @@ def main() -> None:
# Initialize memory and make sure it is empty.
# this is particularly important for indexing and referencing pinecone memory
memory = get_memory(cfg, init=True)
logger.typewriter_log(f"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}")
logger.typewriter_log(
f"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
)
logger.typewriter_log(f"Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
agent = Agent(
ai_name=ai_name,
Expand Down
20 changes: 12 additions & 8 deletions autogpt/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,13 +89,13 @@ def get_command(response: str):


def map_command_synonyms(command_name: str):
""" Takes the original command name given by the AI, and checks if the
string matches a list of common/known hallucinations
"""Takes the original command name given by the AI, and checks if the
string matches a list of common/known hallucinations
"""
synonyms = [
('write_file', 'write_to_file'),
('create_file', 'write_to_file'),
('search', 'google')
("write_file", "write_to_file"),
("create_file", "write_to_file"),
("search", "google"),
]
for seen_command, actual_command_name in synonyms:
if command_name == seen_command:
Expand Down Expand Up @@ -125,7 +125,7 @@ def execute_command(command_name: str, arguments):
google_result = google_official_search(arguments["input"])
else:
google_result = google_search(arguments["input"])
safe_message = google_result.encode('utf-8', 'ignore')
safe_message = google_result.encode("utf-8", "ignore")
return str(safe_message)
elif command_name == "memory_add":
return memory.add(arguments["string"])
Expand All @@ -144,7 +144,9 @@ def execute_command(command_name: str, arguments):
elif command_name == "get_hyperlinks":
return get_hyperlinks(arguments["url"])
elif command_name == "clone_repository":
return clone_repository(arguments["repository_url"], arguments["clone_path"])
return clone_repository(
arguments["repository_url"], arguments["clone_path"]
)
elif command_name == "read_file":
return read_file(arguments["file"])
elif command_name == "write_to_file":
Expand Down Expand Up @@ -278,7 +280,9 @@ def list_agents():
Returns:
str: A list of all agents
"""
return "List of agents:\n" + "\n".join([str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()])
return "List of agents:\n" + "\n".join(
[str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()]
)


def delete_agent(key: str) -> str:
Expand Down
2 changes: 1 addition & 1 deletion autogpt/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def parse_arguments() -> None:
"--use-browser",
"-b",
dest="browser_name",
help="Specifies which web-browser to use when using selenium to scrape the web."
help="Specifies which web-browser to use when using selenium to scrape the web.",
)
parser.add_argument(
"--ai-settings",
Expand Down
4 changes: 2 additions & 2 deletions autogpt/commands/execute_code.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,8 @@ def execute_shell(command_line: str) -> str:
str: The output of the command
"""
current_dir = os.getcwd()

if str(WORKING_DIRECTORY) not in current_dir: # Change dir into workspace if necessary
# Change dir into workspace if necessary
if str(WORKING_DIRECTORY) not in current_dir:
work_dir = os.path.join(os.getcwd(), WORKING_DIRECTORY)
os.chdir(work_dir)

Expand Down
20 changes: 13 additions & 7 deletions autogpt/commands/git_operations.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,20 @@
"""Git operations for autogpt"""
import git
from autogpt.config import Config

cfg = Config()
CFG = Config()


def clone_repository(repo_url, clone_path):
"""Clone a github repository locally"""
def clone_repository(repo_url: str, clone_path: str) -> str:
"""Clone a github repository locally

Args:
repo_url (str): The URL of the repository to clone
clone_path (str): The path to clone the repository to

Returns:
str: The result of the clone operation"""
split_url = repo_url.split("//")
auth_repo_url = f"//{cfg.github_username}:{cfg.github_api_key}@".join(split_url)
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
git.Repo.clone_from(auth_repo_url, clone_path)
result = f"""Cloned {repo_url} to {clone_path}"""

return result
return f"""Cloned {repo_url} to {clone_path}"""
6 changes: 5 additions & 1 deletion autogpt/commands/web_selenium.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,11 @@ def scrape_text_with_selenium(url: str) -> Tuple[WebDriver, str]:
"""
logging.getLogger("selenium").setLevel(logging.CRITICAL)

options_available = {'chrome': ChromeOptions, 'safari': SafariOptions, 'firefox': FirefoxOptions}
options_available = {
"chrome": ChromeOptions,
"safari": SafariOptions,
"firefox": FirefoxOptions,
}

options = options_available[CFG.selenium_web_browser]()
options.add_argument(
Expand Down
4 changes: 3 additions & 1 deletion autogpt/config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,9 @@ def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None:
config_params = {}
self.openai_api_type = config_params.get("azure_api_type") or "azure"
self.openai_api_base = config_params.get("azure_api_base") or ""
self.openai_api_version = config_params.get("azure_api_version") or "2023-03-15-preview"
self.openai_api_version = (
config_params.get("azure_api_version") or "2023-03-15-preview"
)
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", [])

def set_continuous_mode(self, value: bool) -> None:
Expand Down
15 changes: 12 additions & 3 deletions autogpt/json_fixes/auto_fix.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,20 @@
from autogpt.llm_utils import call_ai_function
from autogpt.logs import logger
from autogpt.config import Config
cfg = Config()

CFG = Config()


def fix_json(json_string: str, schema: str) -> str:
"""Fix the given JSON string to make it parseable and fully compliant with the provided schema."""
"""Fix the given JSON string to make it parseable and fully compliant with
the provided schema.

Args:
json_string (str): The JSON string to fix.
schema (str): The schema to use to fix the JSON.
Returns:
str: The fixed JSON string.
"""
# Try to fix the JSON using GPT:
function_string = "def fix_json(json_string: str, schema:str=None) -> str:"
args = [f"'''{json_string}'''", f"'''{schema}'''"]
Expand All @@ -24,7 +33,7 @@ def fix_json(json_string: str, schema: str) -> str:
if not json_string.startswith("`"):
json_string = "```json\n" + json_string + "\n```"
result_string = call_ai_function(
function_string, args, description_string, model=cfg.fast_llm_model
function_string, args, description_string, model=CFG.fast_llm_model
)
logger.debug("------------ JSON FIX ATTEMPT ---------------")
logger.debug(f"Original JSON: {json_string}")
Expand Down
15 changes: 12 additions & 3 deletions autogpt/memory/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,10 @@ def get_memory(cfg, init=False):
memory = RedisMemory(cfg)
elif cfg.memory_backend == "milvus":
if not MilvusMemory:
print("Error: Milvus sdk is not installed."
"Please install pymilvus to use Milvus as memory backend.")
print(
"Error: Milvus sdk is not installed."
"Please install pymilvus to use Milvus as memory backend."
)
else:
memory = MilvusMemory(cfg)
elif cfg.memory_backend == "no_memory":
Expand All @@ -68,4 +70,11 @@ def get_supported_memory_backends():
return supported_memory


__all__ = ["get_memory", "LocalCache", "RedisMemory", "PineconeMemory", "NoMemory", "MilvusMemory"]
__all__ = [
"get_memory",
"LocalCache",
"RedisMemory",
"PineconeMemory",
"NoMemory",
"MilvusMemory",
]
81 changes: 52 additions & 29 deletions autogpt/memory/milvus.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
""" Milvus memory storage provider."""
from pymilvus import (
connections,
FieldSchema,
Expand All @@ -10,21 +11,20 @@


class MilvusMemory(MemoryProviderSingleton):
def __init__(self, cfg):
""" Construct a milvus memory storage connection.
"""Milvus memory storage provider."""

def __init__(self, cfg) -> None:
"""Construct a milvus memory storage connection.

Args:
cfg (Config): Auto-GPT global config.
"""
# connect to milvus server.
connections.connect(address=cfg.milvus_addr)
fields = [
FieldSchema(name="pk", dtype=DataType.INT64,
is_primary=True, auto_id=True),
FieldSchema(name="embeddings",
dtype=DataType.FLOAT_VECTOR, dim=1536),
FieldSchema(name="raw_text", dtype=DataType.VARCHAR,
max_length=65535)
FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True),
FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=1536),
FieldSchema(name="raw_text", dtype=DataType.VARCHAR, max_length=65535),
]

# create collection if not exist and load it.
Expand All @@ -34,15 +34,19 @@ def __init__(self, cfg):
# create index if not exist.
if not self.collection.has_index():
self.collection.release()
self.collection.create_index("embeddings", {
"metric_type": "IP",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
}, index_name="embeddings")
self.collection.create_index(
"embeddings",
{
"metric_type": "IP",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
},
index_name="embeddings",
)
self.collection.load()

def add(self, data):
""" Add a embedding of data into memory.
def add(self, data) -> str:
"""Add a embedding of data into memory.

Args:
data (str): The raw text to construct embedding index.
Expand All @@ -52,34 +56,48 @@ def add(self, data):
"""
embedding = get_ada_embedding(data)
result = self.collection.insert([[embedding], [data]])
_text = f"Inserting data into memory at primary key: {result.primary_keys[0]}:\n data: {data}"
_text = (
"Inserting data into memory at primary key: "
f"{result.primary_keys[0]}:\n data: {data}"
)
return _text

def get(self, data):
""" Return the most relevant data in memory.
"""Return the most relevant data in memory.
Args:
data: The data to compare to.
"""
return self.get_relevant(data, 1)

def clear(self):
""" Drop the index in memory.
def clear(self) -> str:
"""Drop the index in memory.

Returns:
str: log.
"""
self.collection.drop()
self.collection = Collection(self.milvus_collection, self.schema)
self.collection.create_index("embeddings", {
"metric_type": "IP",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
}, index_name="embeddings")
self.collection.create_index(
"embeddings",
{
"metric_type": "IP",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
},
index_name="embeddings",
)
self.collection.load()
return "Obliviated"

def get_relevant(self, data, num_relevant=5):
""" Return the top-k relevant data in memory.
def get_relevant(self, data: str, num_relevant: int = 5):
"""Return the top-k relevant data in memory.
Args:
data: The data to compare to.
num_relevant (int, optional): The max number of relevant data. Defaults to 5.
num_relevant (int, optional): The max number of relevant data.
Defaults to 5.

Returns:
list: The top-k relevant data.
"""
# search the embedding and return the most relevant text.
embedding = get_ada_embedding(data)
Expand All @@ -88,10 +106,15 @@ def get_relevant(self, data, num_relevant=5):
"params": {"nprobe": 8},
}
result = self.collection.search(
[embedding], "embeddings", search_params, num_relevant, output_fields=["raw_text"])
[embedding],
"embeddings",
search_params,
num_relevant,
output_fields=["raw_text"],
)
return [item.entity.value_of_field("raw_text") for item in result[0]]

def get_stats(self):
def get_stats(self) -> str:
"""
Returns: The stats of the milvus cache.
"""
Expand Down
6 changes: 5 additions & 1 deletion autogpt/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,11 @@ def get_prompt() -> str:
),
("List GPT Agents", "list_agents", {}),
("Delete GPT Agent", "delete_agent", {"key": "<key>"}),
("Clone Repository", "clone_repository", {"repository_url": "<url>", "clone_path": "<directory>"}),
(
"Clone Repository",
"clone_repository",
{"repository_url": "<url>", "clone_path": "<directory>"},
),
("Write to file", "write_to_file", {"file": "<file>", "text": "<text>"}),
("Read file", "read_file", {"file": "<file>"}),
("Append to file", "append_to_file", {"file": "<file>", "text": "<text>"}),
Expand Down
18 changes: 11 additions & 7 deletions scripts/check_requirements.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,31 @@
import pkg_resources
import sys


def main():
requirements_file = sys.argv[1]
with open(requirements_file, 'r') as f:
required_packages = [line.strip().split('#')[0].strip() for line in f.readlines()]
with open(requirements_file, "r") as f:
required_packages = [
line.strip().split("#")[0].strip() for line in f.readlines()
]

installed_packages = [package.key for package in pkg_resources.working_set]

missing_packages = []
for package in required_packages:
if not package: # Skip empty lines
continue
package_name = package.strip().split('==')[0]
package_name = package.strip().split("==")[0]
if package_name.lower() not in installed_packages:
missing_packages.append(package_name)

if missing_packages:
print('Missing packages:')
print(', '.join(missing_packages))
print("Missing packages:")
print(", ".join(missing_packages))
sys.exit(1)
else:
print('All packages are installed.')
print("All packages are installed.")


if __name__ == '__main__':
if __name__ == "__main__":
main()
1 change: 0 additions & 1 deletion tests/milvus_memory_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ def MockConfig():
"speak_mode": False,
"milvus_collection": "autogpt",
"milvus_addr": "localhost:19530",

},
)

Expand Down
Loading