Skip to content

Commit

Permalink
yapf code quality
Browse files Browse the repository at this point in the history
  • Loading branch information
kyegomez committed Nov 8, 2023
1 parent c279784 commit 2e7905d
Show file tree
Hide file tree
Showing 104 changed files with 2,256 additions and 2,465 deletions.
5 changes: 3 additions & 2 deletions quality.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

# Run autopep8 with max aggressiveness (-aaa) and in-place modification (-i)
# on all Python files (*.py) under the 'swarms' directory.
autopep8 --in-place --aggressive --aggressive --recursive --experimental swarms/
autopep8 --in-place --aggressive --aggressive --recursive --experimental --list-fixes swarms/

# Run black with default settings, since black does not have an aggressiveness level.
# Black will format all Python files it finds in the 'swarms' directory.
Expand All @@ -15,4 +15,5 @@ black --experimental-string-processing swarms/
# Add any additional flags if needed according to your version of ruff.
ruff swarms/

# If you want to ensure the script stops if any command fails, add 'set -e' at the top.
# YAPF
yapf --recursive --in-place --verbose --style=google --parallel swarms
2 changes: 0 additions & 2 deletions swarms/agents/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@

# from swarms.agents.idea_to_image_agent import Idea2Image
from swarms.agents.simple_agent import SimpleAgent


"""Agent Infrastructure, models, memory, utils, tools"""

__all__ = [
Expand Down
121 changes: 56 additions & 65 deletions swarms/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@
from langchain.chat_models.base import BaseChatModel
from langchain.memory import ChatMessageHistory
from langchain.prompts.chat import (
BaseChatPromptTemplate,
)
BaseChatPromptTemplate,)
from langchain.schema import (
BaseChatMessageHistory,
Document,
Expand All @@ -34,7 +33,6 @@
)
from langchain_experimental.pydantic_v1 import BaseModel, ValidationError


# PROMPT
FINISH_NAME = "finish"

Expand Down Expand Up @@ -72,14 +70,12 @@ class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel): # type: ignore[misc]
send_token_limit: int = 4196

def construct_full_prompt(self, goals: List[str]) -> str:
prompt_start = (
"Your decisions must always be made independently "
"without seeking user assistance.\n"
"Play to your strengths as an LLM and pursue simple "
"strategies with no legal complications.\n"
"If you have completed all your tasks, make sure to "
'use the "finish" command.'
)
prompt_start = ("Your decisions must always be made independently "
"without seeking user assistance.\n"
"Play to your strengths as an LLM and pursue simple "
"strategies with no legal complications.\n"
"If you have completed all your tasks, make sure to "
'use the "finish" command.')
# Construct full prompt
full_prompt = (
f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
Expand All @@ -91,25 +87,23 @@ def construct_full_prompt(self, goals: List[str]) -> str:
return full_prompt

def format_messages(self, **kwargs: Any) -> List[BaseMessage]:
base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs["goals"]))
base_prompt = SystemMessage(
content=self.construct_full_prompt(kwargs["goals"]))
time_prompt = SystemMessage(
content=f"The current time and date is {time.strftime('%c')}"
)
used_tokens = self.token_counter(base_prompt.content) + self.token_counter(
time_prompt.content
)
content=f"The current time and date is {time.strftime('%c')}")
used_tokens = self.token_counter(
base_prompt.content) + self.token_counter(time_prompt.content)
memory: VectorStoreRetriever = kwargs["memory"]
previous_messages = kwargs["messages"]
relevant_docs = memory.get_relevant_documents(str(previous_messages[-10:]))
relevant_docs = memory.get_relevant_documents(
str(previous_messages[-10:]))
relevant_memory = [d.page_content for d in relevant_docs]
relevant_memory_tokens = sum(
[self.token_counter(doc) for doc in relevant_memory]
)
[self.token_counter(doc) for doc in relevant_memory])
while used_tokens + relevant_memory_tokens > 2500:
relevant_memory = relevant_memory[:-1]
relevant_memory_tokens = sum(
[self.token_counter(doc) for doc in relevant_memory]
)
[self.token_counter(doc) for doc in relevant_memory])
content_format = (
f"This reminds you of these events from your past:\n{relevant_memory}\n\n"
)
Expand Down Expand Up @@ -147,13 +141,23 @@ def __init__(self) -> None:
self.performance_evaluation: List[str] = []
self.response_format = {
"thoughts": {
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user",
"text":
"thought",
"reasoning":
"reasoning",
"plan":
"- short bulleted\n- list that conveys\n- long-term plan",
"criticism":
"constructive self-criticism",
"speak":
"thoughts summary to say to user",
},
"command": {
"name": "command name",
"args": {
"arg name": "value"
}
},
"command": {"name": "command name", "args": {"arg name": "value"}},
}

def add_constraint(self, constraint: str) -> None:
Expand Down Expand Up @@ -191,7 +195,9 @@ def add_performance_evaluation(self, evaluation: str) -> None:
"""
self.performance_evaluation.append(evaluation)

def _generate_numbered_list(self, items: list, item_type: str = "list") -> str:
def _generate_numbered_list(self,
items: list,
item_type: str = "list") -> str:
"""
Generate a numbered list from given items based on the item_type.
Expand All @@ -209,16 +215,11 @@ def _generate_numbered_list(self, items: list, item_type: str = "list") -> str:
for i, item in enumerate(items)
]
finish_description = (
"use this to signal that you have finished all your objectives"
)
finish_args = (
'"response": "final response to let '
'people know you have finished your objectives"'
)
finish_string = (
f"{len(items) + 1}. {FINISH_NAME}: "
f"{finish_description}, args: {finish_args}"
)
"use this to signal that you have finished all your objectives")
finish_args = ('"response": "final response to let '
'people know you have finished your objectives"')
finish_string = (f"{len(items) + 1}. {FINISH_NAME}: "
f"{finish_description}, args: {finish_args}")
return "\n".join(command_strings + [finish_string])
else:
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
Expand All @@ -239,8 +240,7 @@ def generate_prompt_string(self) -> str:
f"{self._generate_numbered_list(self.performance_evaluation)}\n\n"
"You should only respond in JSON format as described below "
f"\nResponse Format: \n{formatted_response_format} "
"\nEnsure the response can be parsed by Python json.loads"
)
"\nEnsure the response can be parsed by Python json.loads")

return prompt_string

Expand All @@ -261,13 +261,11 @@ def get_prompt(tools: List[BaseTool]) -> str:
prompt_generator.add_constraint(
"~16000 word limit for short term memory. "
"Your short term memory is short, "
"so immediately save important information to files."
)
"so immediately save important information to files.")
prompt_generator.add_constraint(
"If you are unsure how you previously did something "
"or want to recall past events, "
"thinking about similar events will help you remember."
)
"thinking about similar events will help you remember.")
prompt_generator.add_constraint("No user assistance")
prompt_generator.add_constraint(
'Exclusively use the commands listed in double quotes e.g. "command name"'
Expand All @@ -279,29 +277,23 @@ def get_prompt(tools: List[BaseTool]) -> str:

# Add resources to the PromptGenerator object
prompt_generator.add_resource(
"Internet access for searches and information gathering."
)
"Internet access for searches and information gathering.")
prompt_generator.add_resource("Long Term memory management.")
prompt_generator.add_resource(
"GPT-3.5 powered Agents for delegation of simple tasks."
)
"GPT-3.5 powered Agents for delegation of simple tasks.")
prompt_generator.add_resource("File output.")

# Add performance evaluations to the PromptGenerator object
prompt_generator.add_performance_evaluation(
"Continuously review and analyze your actions "
"to ensure you are performing to the best of your abilities."
)
"to ensure you are performing to the best of your abilities.")
prompt_generator.add_performance_evaluation(
"Constructively self-criticize your big-picture behavior constantly."
)
"Constructively self-criticize your big-picture behavior constantly.")
prompt_generator.add_performance_evaluation(
"Reflect on past decisions and strategies to refine your approach."
)
"Reflect on past decisions and strategies to refine your approach.")
prompt_generator.add_performance_evaluation(
"Every command has a cost, so be smart and efficient. "
"Aim to complete tasks in the least number of steps."
)
"Aim to complete tasks in the least number of steps.")

# Generate the prompt string
prompt_string = prompt_generator.generate_prompt_string()
Expand Down Expand Up @@ -372,10 +364,8 @@ def from_llm_and_tools(
)

def run(self, goals: List[str]) -> str:
user_input = (
"Determine which next command to use, "
"and respond using the format specified above:"
)
user_input = ("Determine which next command to use, "
"and respond using the format specified above:")
# Interaction Loop
loop_count = 0
while True:
Expand All @@ -392,8 +382,10 @@ def run(self, goals: List[str]) -> str:

# Print Assistant thoughts
print(assistant_reply)
self.chat_history_memory.add_message(HumanMessage(content=user_input))
self.chat_history_memory.add_message(AIMessage(content=assistant_reply))
self.chat_history_memory.add_message(
HumanMessage(content=user_input))
self.chat_history_memory.add_message(
AIMessage(content=assistant_reply))

# Get command name and arguments
action = self.output_parser.parse(assistant_reply)
Expand All @@ -419,8 +411,7 @@ def run(self, goals: List[str]) -> str:
result = (
f"Unknown command '{action.name}'. "
"Please refer to the 'COMMANDS' list for available "
"commands and only respond in the specified JSON format."
)
"commands and only respond in the specified JSON format.")

memory_to_add = f"Assistant Reply: {assistant_reply} \nResult: {result} "
if self.feedback_tool is not None:
Expand Down
54 changes: 27 additions & 27 deletions swarms/agents/aot.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@

import openai_model

logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logging.basicConfig(level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)


class OpenAI:

def __init__(
self,
api_key,
Expand Down Expand Up @@ -68,16 +68,13 @@ def run(self, prompt, max_tokens, temperature, k=1, stop=None):
temperature=temperature,
)
with open("openai.logs", "a") as log_file:
log_file.write(
"\n" + "-----------" + "\n" + "Prompt : " + prompt + "\n"
)
log_file.write("\n" + "-----------" + "\n" + "Prompt : " +
prompt + "\n")
return response
except openai_model.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(
f"{str(e)}, sleep for {sleep_duratoin}s, set it by env"
" OPENAI_RATE_TIMEOUT"
)
print(f"{str(e)}, sleep for {sleep_duratoin}s, set it by env"
" OPENAI_RATE_TIMEOUT")
time.sleep(sleep_duratoin)

def openai_choice2text_handler(self, choice):
Expand All @@ -100,11 +97,16 @@ def generate_text(self, prompt, k):
else:
response = self.run(prompt, 300, 0.5, k)
thoughts = [
self.openai_choice2text_handler(choice) for choice in response.choices
self.openai_choice2text_handler(choice)
for choice in response.choices
]
return thoughts

def generate_thoughts(self, state, k, initial_prompt, rejected_solutions=None):
def generate_thoughts(self,
state,
k,
initial_prompt,
rejected_solutions=None):
if isinstance(state, str):
pass
else:
Expand Down Expand Up @@ -177,7 +179,8 @@ def evaluate_states(self, states, initial_prompt):
"""
response = self.run(prompt, 10, 1)
try:
value_text = self.openai_choice2text_handler(response.choices[0])
value_text = self.openai_choice2text_handler(
response.choices[0])
# print(f'state: {value_text}')
value = float(value_text)
print(f"Evaluated Thought Value: {value}")
Expand All @@ -187,10 +190,12 @@ def evaluate_states(self, states, initial_prompt):
return state_values

else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
raise ValueError(
"Invalid evaluation strategy. Choose 'value' or 'vote'.")


class AoTAgent:

def __init__(
self,
num_thoughts: int = None,
Expand Down Expand Up @@ -222,7 +227,8 @@ def solve(self):
return None

best_state, _ = max(self.output, key=lambda x: x[1])
solution = self.model.generate_solution(self.initial_prompt, best_state)
solution = self.model.generate_solution(self.initial_prompt,
best_state)
print(f"Solution is {solution}")
return solution if solution else best_state
except Exception as error:
Expand All @@ -239,11 +245,8 @@ def dfs(self, state, step):
for next_state in thoughts:
state_value = self.evaluated_thoughts[next_state]
if state_value > self.value_threshold:
child = (
(state, next_state)
if isinstance(state, str)
else (*state, next_state)
)
child = ((state, next_state) if isinstance(state, str) else
(*state, next_state))
self.dfs(child, step + 1)

# backtracking
Expand All @@ -253,17 +256,14 @@ def dfs(self, state, step):
continue

def generate_and_filter_thoughts(self, state):
thoughts = self.model.generate_thoughts(
state, self.num_thoughts, self.initial_prompt
)
thoughts = self.model.generate_thoughts(state, self.num_thoughts,
self.initial_prompt)

self.evaluated_thoughts = self.model.evaluate_states(
thoughts, self.initial_prompt
)
thoughts, self.initial_prompt)

filtered_thoughts = [
thought
for thought in thoughts
thought for thought in thoughts
if self.evaluated_thoughts[thought] >= self.pruning_threshold
]
print(f"filtered_thoughts: {filtered_thoughts}")
Expand Down

0 comments on commit 2e7905d

Please sign in to comment.