Skip to content

Commit

Permalink
groupchat
Browse files Browse the repository at this point in the history
  • Loading branch information
kyegomez committed Nov 4, 2023
1 parent 7d888c6 commit 154f50c
Show file tree
Hide file tree
Showing 3 changed files with 241 additions and 186 deletions.
136 changes: 38 additions & 98 deletions groupchat.py
Original file line number Diff line number Diff line change
@@ -1,109 +1,49 @@
# from swarms.structs import Flow
# from swarms.models import OpenAIChat
# from swarms.swarms.groupchat import GroupChat
# from swarms.agents import SimpleAgent
from swarms import OpenAI, Flow
from swarms.swarms.groupchat import GroupChatManager, GroupChat

# api_key = ""

# llm = OpenAIChat(
# openai_api_key=api_key,
# )
api_key = ""

# agent1 = SimpleAgent("Captain Price", Flow(llm=llm, max_loops=4))
# agent2 = SimpleAgent("John Mactavis", Flow(llm=llm, max_loops=4))

# # Create a groupchat with the 2 agents
# chat = GroupChat([agent1, agent2])

# # Assign duties to the agents
# chat.assign_duty(agent1.name, "Buy the groceries")
# chat.assign_duty(agent2.name, "Clean the house")

# # Initate a chat
# response = chat.run("Captain Price", "Hello, how are you John?")
# print(response)


from swarms.models import OpenAIChat
from swarms.structs import Flow
import random

api_key = "" # Your API Key here


class GroupChat:
"""
GroupChat class that facilitates agent-to-agent communication using multiple instances of the Flow class.
"""

def __init__(self, agents: list):
self.agents = {f"agent_{i}": agent for i, agent in enumerate(agents)}
self.message_log = []

def add_agent(self, agent: Flow):
agent_id = f"agent_{len(self.agents)}"
self.agents[agent_id] = agent

def remove_agent(self, agent_id: str):
if agent_id in self.agents:
del self.agents[agent_id]

def send_message(self, sender_id: str, recipient_id: str, message: str):
if sender_id not in self.agents or recipient_id not in self.agents:
raise ValueError("Invalid sender or recipient ID.")
formatted_message = f"{sender_id} to {recipient_id}: {message}"
self.message_log.append(formatted_message)
recipient_agent = self.agents[recipient_id]
recipient_agent.run(message)

def broadcast_message(self, sender_id: str, message: str):
for agent_id, agent in self.agents.items():
if agent_id != sender_id:
self.send_message(sender_id, agent_id, message)

def get_message_log(self):
return self.message_log


class EnhancedGroupChatV2(GroupChat):
def __init__(self, agents: list):
super().__init__(agents)

def multi_round_conversation(self, rounds: int = 5):
"""
Initiate a multi-round conversation between agents.
Args:
rounds (int): The number of rounds of conversation.
"""
for _ in range(rounds):
# Randomly select a sender and recipient agent for the conversation
sender_id = random.choice(list(self.agents.keys()))
recipient_id = random.choice(list(self.agents.keys()))
while recipient_id == sender_id: # Ensure the recipient is not the sender
recipient_id = random.choice(list(self.agents.keys()))

# Generate a message (for simplicity, a generic message is used)
message = f"Hello {recipient_id}, how are you today?"
self.send_message(sender_id, recipient_id, message)


# Sample usage with EnhancedGroupChatV2
# Initialize the language model
llm = OpenAIChat(
llm = OpenAI(
openai_api_key=api_key,
temperature=0.5,
max_tokens=3000,
)

# Initialize two Flow agents
agent1 = Flow(llm=llm, max_loops=5, dashboard=True)
agent2 = Flow(llm=llm, max_loops=5, dashboard=True)
# Initialize the flow
flow1 = Flow(
llm=llm,
max_loops=1,
system_message="YOU ARE SILLY, YOU OFFER NOTHING OF VALUE",
name='silly',
dashboard=True,
)
flow2 = Flow(
llm=llm,
max_loops=1,
system_message="YOU ARE VERY SMART AND ANSWER RIDDLES",
name='detective',
dashboard=True,
)
flow3 = Flow(
llm=llm,
max_loops=1,
system_message="YOU MAKE RIDDLES",
name='riddler',
dashboard=True,
)
manager = Flow(
llm=llm,
max_loops=1,
system_message="YOU ARE A GROUP CHAT MANAGER",
name='manager',
dashboard=True,
)

# Create an enhanced group chat with the two agents
enhanced_group_chat_v2 = EnhancedGroupChatV2(agents=[agent1, agent2])

# Simulate multi-round agent to agent communication
enhanced_group_chat_v2.multi_round_conversation(rounds=5)
# Example usage:
agents = [flow1, flow2, flow3]

enhanced_group_chat_v2.get_message_log() # Get the conversation log
group_chat = GroupChat(agents=agents, messages=[], max_round=10)
chat_manager = GroupChatManager(groupchat=group_chat, selector = manager)
chat_history = chat_manager("Write me a riddle")
106 changes: 101 additions & 5 deletions swarms/structs/flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
This will enable you to leave the flow loop.
"""


# Custome stopping condition
def stop_when_repeats(response: str) -> bool:
# Stop if the word stop appears in the response
Expand Down Expand Up @@ -100,6 +101,8 @@ def __init__(
retry_interval: int = 1,
interactive: bool = False,
dashboard: bool = False,
name: str = "Flow agent",
system_message: str = FLOW_SYSTEM_PROMPT,
# tools: List[BaseTool] = None,
dynamic_temperature: bool = False,
**kwargs: Any,
Expand All @@ -119,6 +122,8 @@ def __init__(
self.dashboard = dashboard
self.dynamic_temperature = dynamic_temperature
# self.tools = tools
self.system_message = system_message
self.name = name

def provide_feedback(self, feedback: str) -> None:
"""Allow users to provide feedback on the responses."""
Expand All @@ -131,11 +136,6 @@ def _check_stopping_condition(self, response: str) -> bool:
return self.stopping_condition(response)
return False

def __call__(self, prompt, **kwargs) -> str:
"""Invoke the flow by providing a template and its variables."""
response = self.llm(prompt, **kwargs)
return response

def dynamic_temperature(self):
"""
1. Check the self.llm object for the temperature
Expand Down Expand Up @@ -282,6 +282,82 @@ def run(self, task: str, save: bool = True, **kwargs):

return response # , history

def __call__(self, task: str, save: bool = True, **kwargs):
"""
Run the autonomous agent loop
Args:
task (str): The initial task to run
Flow:
1. Generate a response
2. Check stopping condition
3. If stopping condition is met, stop
4. If stopping condition is not met, generate a response
5. Repeat until stopping condition is met or max_loops is reached
Example:
>>> out = flow.run("Generate a 10,000 word blog on health and wellness.")
"""
# Start with a new history or continue from the last saved state
if not self.memory or not self.memory[-1]:
history = [f"Human: {task}"]
else:
history = self.memory[-1]

response = task
history = [f"Human: {task}"]

# If dashboard = True then print the dashboard
if self.dashboard:
self.print_dashboard(task)

# Start or continue the loop process
for i in range(len(history), self.max_loops):
print(colored(f"\nLoop {i+1} of {self.max_loops}", "blue"))
print("\n")
response = history[-1].split(": ", 1)[-1] # Get the last response

if self._check_stopping_condition(response) or parse_done_token(response):
break

# Adjust temperature, comment if no work
if self.dynamic_temperature:
self.dynamic_temperature()

attempt = 0
while attempt < self.retry_attempts:
try:
response = self.llm(
self.agent_history_prompt(FLOW_SYSTEM_PROMPT, response)
** kwargs,
)
# print(f"Next query: {response}")
# break
if self.interactive:
print(f"AI: {response}")
history.append(f"AI: {response}")
response = input("You: ")
history.append(f"Human: {response}")
else:
print(f"AI: {response}")
history.append(f"AI: {response}")
print(response)
break
except Exception as e:
logging.error(f"Error generating response: {e}")
attempt += 1
time.sleep(self.retry_interval)
history.append(response)
time.sleep(self.loop_interval)
self.memory.append(history)

if save:
self.save_state("flow_history.json")

return response # , history

def _run(self, **kwargs: Any) -> str:
"""Generate a result using the provided keyword args."""
task = self.format_prompt(**kwargs)
Expand All @@ -304,6 +380,7 @@ def agent_history_prompt(
Returns:
str: The agent history prompt
"""
system_prompt = system_prompt or self.system_message
agent_history_prompt = f"""
SYSTEM_PROMPT: {system_prompt}
Expand Down Expand Up @@ -608,3 +685,22 @@ def retry_on_failure(self, function, retries: int = 3, retry_delay: int = 1):
attempt += 1
time.sleep(retry_delay)
raise Exception("All retry attempts failed")

def generate_reply(self, history: str, **kwargs) -> str:
"""
Generate a response based on initial or task
"""
prompt = f"""
SYSTEM_PROMPT: {self.system_message}
History: {history}
Your response:
"""
response = self.llm(prompt, **kwargs)
return {"role": self.name, "content": response}

def update_system_message(self, system_message: str):
"""Upddate the system message"""
self.system_message = system_message
Loading

0 comments on commit 154f50c

Please sign in to comment.