Skip to content

Commit

Permalink
[CODE QUALITY]
Browse files Browse the repository at this point in the history
  • Loading branch information
kyegomez committed Nov 29, 2023
1 parent 87e8b90 commit dc55006
Show file tree
Hide file tree
Showing 158 changed files with 3,240 additions and 1,320 deletions.
8 changes: 5 additions & 3 deletions playground/agents/mm_agent_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,16 @@

node = MultiModalAgent(load_dict)

text = node.run_text("What is your name? Generate a picture of yourself")
text = node.run_text(
"What is your name? Generate a picture of yourself"
)

img = node.run_img("/image1", "What is this image about?")

chat = node.chat(
(
"What is your name? Generate a picture of yourself. What is this image"
" about?"
"What is your name? Generate a picture of yourself. What is"
" this image about?"
),
streaming=True,
)
6 changes: 4 additions & 2 deletions playground/demos/accountant_team/account_team2.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,8 @@
pdf_path = "bankstatement.pdf"
fraud_detection_instructions = "Detect fraud in the document"
summary_agent_instructions = (
"Generate an actionable summary of the document with action steps to take"
"Generate an actionable summary of the document with action steps"
" to take"
)
decision_making_support_agent_instructions = (
"Provide decision making support to the business owner:"
Expand All @@ -77,5 +78,6 @@

# Provide decision making support to the accountant
decision_making_support_agent_output = decision_making_support_agent.run(
f"{decision_making_support_agent_instructions}: {summary_agent_output}"
f"{decision_making_support_agent_instructions}:"
f" {summary_agent_output}"
)
11 changes: 8 additions & 3 deletions playground/demos/accountant_team/accountant_team.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,9 @@ def __init__(
super().__init__()
self.pdf_path = pdf_path
self.list_pdfs = list_pdfs
self.fraud_detection_instructions = fraud_detection_instructions
self.fraud_detection_instructions = (
fraud_detection_instructions
)
self.summary_agent_instructions = summary_agent_instructions
self.decision_making_support_agent_instructions = (
decision_making_support_agent_instructions
Expand All @@ -98,7 +100,8 @@ def run(self):

# Generate an actionable summary of the document
summary_agent_output = summary_generator_agent.run(
f"{self.summary_agent_instructions}: {fraud_detection_agent_output}"
f"{self.summary_agent_instructions}:"
f" {fraud_detection_agent_output}"
)

# Provide decision making support to the accountant
Expand All @@ -113,7 +116,9 @@ def run(self):
swarm = AccountantSwarms(
pdf_path="tesla.pdf",
fraud_detection_instructions="Detect fraud in the document",
summary_agent_instructions="Generate an actionable summary of the document",
summary_agent_instructions=(
"Generate an actionable summary of the document"
),
decision_making_support_agent_instructions=(
"Provide decision making support to the business owner:"
),
Expand Down
29 changes: 29 additions & 0 deletions playground/demos/ad_gen/ad_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,34 @@ def __init__(self, product_name):
"in a luxurious setting", "in a playful and colorful background", "in an ice cave setting",
"in a serene and calm landscape"
]
<<<<<<< HEAD
=======
self.contexts = [
"high realism product ad (extremely creative)"
]
>>>>>>> 831147e ([CODE QUALITY])

def generate_concept(self):
theme = random.choice(self.themes)
context = random.choice(self.contexts)
<<<<<<< HEAD
return f"An ad for {self.product_name} that embodies a {theme} theme {context}"

# User input
product_name = input("Enter a product name for ad creation (e.g., 'PS5', 'AirPods', 'Kirkland Vodka'): ")
social_media_platform = input("Enter a social media platform (e.g., 'Facebook', 'Twitter', 'Instagram'): ")
=======
return (
f"{theme} inside a {style} {self.product_name}, {context}"
)


# User input
product_name = input(
"Enter a product name for ad creation (e.g., 'PS5', 'AirPods',"
" 'Kirkland Vodka'): "
)
>>>>>>> 831147e ([CODE QUALITY])

# Generate creative concept
concept_generator = ProductAdConceptGenerator(product_name)
Expand All @@ -53,6 +72,16 @@ def generate_concept(self):
ad_copy = ad_copy_agent.run(task=ad_copy_prompt)

# Output the results
<<<<<<< HEAD
print("Creative Concept:", creative_concept)
print("Image Path:", image_paths[0] if image_paths else "No image generated")
print("Ad Copy:", ad_copy)
=======
print("Creative Concept:", concept_result)
print("Design Ideas:", design_result)
print("Ad Copy:", copywriting_result)
print(
"Image Path:",
image_paths[0] if image_paths else "No image generated",
)
>>>>>>> 831147e ([CODE QUALITY])
8 changes: 5 additions & 3 deletions playground/demos/ai_research_team/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,9 @@

paper = pdf_to_text(PDF_PATH)
algorithmic_psuedocode_agent = paper_summarizer_agent.run(
"Focus on creating the algorithmic pseudocode for the novel method in this"
f" paper: {paper}"
"Focus on creating the algorithmic pseudocode for the novel"
f" method in this paper: {paper}"
)
pytorch_code = paper_implementor_agent.run(
algorithmic_psuedocode_agent
)
pytorch_code = paper_implementor_agent.run(algorithmic_psuedocode_agent)
8 changes: 4 additions & 4 deletions playground/demos/assembly/assembly.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@
llm = GPT4VisionAPI()

task = (
"Analyze this image of an assembly line and identify any issues such as"
" misaligned parts, defects, or deviations from the standard assembly"
" process. IF there is anything unsafe in the image, explain why it is"
" unsafe and how it could be improved."
"Analyze this image of an assembly line and identify any issues"
" such as misaligned parts, defects, or deviations from the"
" standard assembly process. IF there is anything unsafe in the"
" image, explain why it is unsafe and how it could be improved."
)
img = "assembly_line.jpg"

Expand Down
21 changes: 16 additions & 5 deletions playground/demos/autobloggen.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@

# Prompts
topic_selection_task = (
"Generate 10 topics on gaining mental clarity using ancient practices"
"Generate 10 topics on gaining mental clarity using ancient"
" practices"
)


Expand Down Expand Up @@ -54,7 +55,9 @@ def __init__(
):
self.llm = llm()
self.topic_selection_task = topic_selection_task
self.topic_selection_agent_prompt = topic_selection_agent_prompt
self.topic_selection_agent_prompt = (
topic_selection_agent_prompt
)
self.objective = objective
self.iterations = iterations
self.max_retries = max_retries
Expand Down Expand Up @@ -90,7 +93,9 @@ def get_review_prompt(self, article: str):

def step(self):
"""Steps through the task"""
topic_selection_agent = self.llm(self.topic_selection_agent_prompt)
topic_selection_agent = self.llm(
self.topic_selection_agent_prompt
)
topic_selection_agent = self.print_beautifully(
"Topic Selection Agent", topic_selection_agent
)
Expand All @@ -100,7 +105,9 @@ def step(self):

# Agent that reviews the draft
review_agent = self.llm(self.get_review_prompt(draft_blog))
review_agent = self.print_beautifully("Review Agent", review_agent)
review_agent = self.print_beautifully(
"Review Agent", review_agent
)

# Agent that publishes on social media
distribution_agent = self.llm(
Expand All @@ -119,7 +126,11 @@ def run(self):
except Exception as error:
print(
colored(
f"Error while running AutoBlogGenSwarm {error}", "red"
(
"Error while running AutoBlogGenSwarm"
f" {error}"
),
"red",
)
)
if attempt == self.retry_attempts - 1:
Expand Down
6 changes: 5 additions & 1 deletion playground/demos/autotemp/autotemp.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,11 @@ def evaluate_output(self, output, temperature):
"""
score_text = self.llm(eval_prompt, temperature=0.5)
score_match = re.search(r"\b\d+(\.\d)?\b", score_text)
return round(float(score_match.group()), 1) if score_match else 0.0
return (
round(float(score_match.group()), 1)
if score_match
else 0.0
)

def run(self, prompt, temperature_string):
print("Starting generation process...")
Expand Down
24 changes: 17 additions & 7 deletions playground/demos/autotemp/blog_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,9 @@ def __init__(
blog_topic,
temperature_range: str = "0.4,0.6,0.8,1.0,1.2",
): # Add blog_topic as an argument
self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.8)
self.openai_chat = OpenAIChat(
openai_api_key=api_key, temperature=0.8
)
self.auto_temp = AutoTemp(api_key)
self.temperature_range = temperature_range
self.workflow = SequentialWorkflow(max_loops=5)
Expand Down Expand Up @@ -52,11 +54,15 @@ def run_workflow(self):
)

chosen_topic = topic_output.split("\n")[0]
print(colored("Selected topic: " + chosen_topic, "yellow"))
print(
colored("Selected topic: " + chosen_topic, "yellow")
)

# Initial draft generation with AutoTemp
initial_draft_prompt = self.DRAFT_WRITER_SYSTEM_PROMPT.replace(
"{{CHOSEN_TOPIC}}", chosen_topic
initial_draft_prompt = (
self.DRAFT_WRITER_SYSTEM_PROMPT.replace(
"{{CHOSEN_TOPIC}}", chosen_topic
)
)
auto_temp_output = self.auto_temp.run(
initial_draft_prompt, self.temperature_range
Expand Down Expand Up @@ -89,13 +95,17 @@ def run_workflow(self):
)

# Distribution preparation using OpenAIChat
distribution_prompt = self.DISTRIBUTION_AGENT_SYSTEM_PROMPT.replace(
"{{ARTICLE_TOPIC}}", chosen_topic
distribution_prompt = (
self.DISTRIBUTION_AGENT_SYSTEM_PROMPT.replace(
"{{ARTICLE_TOPIC}}", chosen_topic
)
)
distribution_result = self.openai_chat.generate(
[distribution_prompt]
)
distribution_output = distribution_result.generations[0][0].text
distribution_output = distribution_result.generations[0][
0
].text
print(
colored(
(
Expand Down
4 changes: 3 additions & 1 deletion playground/demos/autotemp/blog_gen_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,9 @@
def main():
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("OPENAI_API_KEY environment variable not set.")
raise ValueError(
"OPENAI_API_KEY environment variable not set."
)

blog_topic = input("Enter the topic for the blog generation: ")

Expand Down
14 changes: 10 additions & 4 deletions playground/demos/developer_swarm/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,18 @@

# Documentation agent
documentation_agent = Agent(
llm=llm, sop=DOCUMENTATION_SOP, max_loops=1,
llm=llm,
sop=DOCUMENTATION_SOP,
max_loops=1,
)


# Tests agent
tests_agent = Agent(llm=llm, sop=TEST_SOP, max_loops=2,)
tests_agent = Agent(
llm=llm,
sop=TEST_SOP,
max_loops=2,
)


# Run the documentation agent
Expand All @@ -52,6 +58,6 @@

# Run the tests agent
tests = tests_agent.run(
f"Write tests for the following code:{TASK} here is the documentation:"
f" {documentation}"
f"Write tests for the following code:{TASK} here is the"
f" documentation: {documentation}"
)
32 changes: 20 additions & 12 deletions playground/demos/nutrition/nutrition.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,15 @@
# Define prompts for various tasks
MEAL_PLAN_PROMPT = (
"Based on the following user preferences: dietary restrictions as"
" vegetarian, preferred cuisines as Italian and Indian, a total caloric"
" intake of around 2000 calories per day, and an exclusion of legumes,"
" create a detailed weekly meal plan. Include a variety of meals for"
" breakfast, lunch, dinner, and optional snacks."
" vegetarian, preferred cuisines as Italian and Indian, a total"
" caloric intake of around 2000 calories per day, and an"
" exclusion of legumes, create a detailed weekly meal plan."
" Include a variety of meals for breakfast, lunch, dinner, and"
" optional snacks."
)
IMAGE_ANALYSIS_PROMPT = (
"Identify the items in this fridge, including their quantities and"
" condition."
"Identify the items in this fridge, including their quantities"
" and condition."
)


Expand Down Expand Up @@ -74,12 +75,15 @@ def generate_integrated_shopping_list(
meal_plan_output, image_analysis, user_preferences
):
# Prepare the prompt for the LLM
fridge_contents = image_analysis["choices"][0]["message"]["content"]
fridge_contents = image_analysis["choices"][0]["message"][
"content"
]
prompt = (
f"Based on this meal plan: {meal_plan_output}, and the following items"
f" in the fridge: {fridge_contents}, considering dietary preferences as"
" vegetarian with a preference for Italian and Indian cuisines,"
" generate a comprehensive shopping list that includes only the items"
f"Based on this meal plan: {meal_plan_output}, and the"
f" following items in the fridge: {fridge_contents},"
" considering dietary preferences as vegetarian with a"
" preference for Italian and Indian cuisines, generate a"
" comprehensive shopping list that includes only the items"
" needed."
)

Expand Down Expand Up @@ -124,6 +128,10 @@ def generate_integrated_shopping_list(

with open("nutrition_output.txt", "w") as file:
file.write("Meal Plan:\n" + meal_plan_output + "\n\n")
file.write("Integrated Shopping List:\n" + integrated_shopping_list + "\n")
file.write(
"Integrated Shopping List:\n"
+ integrated_shopping_list
+ "\n"
)

print("Outputs have been saved to nutrition_output.txt")
11 changes: 7 additions & 4 deletions playground/demos/positive_med/positive_med.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,9 @@ def get_review_prompt(article):
return prompt


def social_media_prompt(article: str, goal: str = "Clicks and engagement"):
def social_media_prompt(
article: str, goal: str = "Clicks and engagement"
):
prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace(
"{{ARTICLE}}", article
).replace("{{GOAL}}", goal)
Expand All @@ -50,11 +52,12 @@ def social_media_prompt(article: str, goal: str = "Clicks and engagement"):

# Agent that generates topics
topic_selection_task = (
"Generate 10 topics on gaining mental clarity using ancient practices"
"Generate 10 topics on gaining mental clarity using ancient"
" practices"
)
topics = llm(
f"Your System Instructions: {TOPIC_GENERATOR_SYSTEM_PROMPT}, Your current"
f" task: {topic_selection_task}"
f"Your System Instructions: {TOPIC_GENERATOR_SYSTEM_PROMPT}, Your"
f" current task: {topic_selection_task}"
)

dashboard = print(
Expand Down
Loading

0 comments on commit dc55006

Please sign in to comment.