Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Generate the roles and role description #247

Merged
merged 40 commits into from
Sep 9, 2023
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
Show all changes
40 commits
Select commit Hold shift + click to select a range
b86eb2b
Generate the roles and role description
Appointat Aug 13, 2023
24b2f0b
Add newline at end of file
Appointat Aug 13, 2023
c1562d3
Add a test for the role_assignment AI agent
Appointat Aug 15, 2023
5f79d38
Update according to the comments
Appointat Aug 15, 2023
d5d05d5
Merge branch 'master' into feature/role-generation
Appointat Aug 15, 2023
022f623
Update camel/agents/role_assignment.py
Appointat Aug 18, 2023
864f59a
Update role_assignment.py
Appointat Aug 18, 2023
fbc0575
Merge branch 'feature/role-generation' of https://github.com/camel-ai…
Appointat Aug 18, 2023
a0bf2c0
Update camel/agents/role_assignment.py
Appointat Aug 18, 2023
b702c77
Update camel/agents/role_assignment.py
Appointat Aug 18, 2023
04bdc24
Update camel/agents/role_assignment.py
Appointat Aug 18, 2023
212a3f2
Update camel/agents/role_assignment.py
Appointat Aug 18, 2023
83999e7
Update
Appointat Aug 18, 2023
07adeb0
Craete the new TaskType.ROLE_DESCRIPTION
Appointat Aug 18, 2023
98bf40a
Add examples and tests for the new TaskType
Appointat Aug 18, 2023
7291e76
Update
Appointat Aug 18, 2023
a1fd3cc
Update
Appointat Aug 18, 2023
ee5f825
Update
Appointat Aug 18, 2023
3ab14d7
Update
Appointat Aug 20, 2023
e453098
Rename to role_assignment_agent
Appointat Aug 21, 2023
473b8a4
Update the branch feature/role-generation from master (#265)
Appointat Aug 30, 2023
84b45b2
Update the branch feature/role-generation from master (#266)
Appointat Aug 30, 2023
5509e54
Merge remote-tracking branch 'origin/master' into feature/role-genera…
Appointat Aug 30, 2023
dec6340
Update camel/societies/role_playing.py
Appointat Aug 31, 2023
6bdfee3
Update camel/agents/role_assignment_agent.py
Appointat Aug 31, 2023
bbb9d5f
Update
Appointat Aug 31, 2023
6581d3b
Merge branch 'feature/role-generation' of https://github.com/camel-ai…
Appointat Aug 31, 2023
b760a7d
Update camel/prompts/role_description_prompt_template.py
Appointat Aug 31, 2023
cdef22f
Update
Appointat Aug 31, 2023
e5ac923
Merge branch 'feature/role-generation'
Appointat Aug 31, 2023
2eccbb0
Update
Appointat Aug 31, 2023
982523a
Update
Appointat Aug 31, 2023
c01db69
Update
Appointat Aug 31, 2023
47d08a9
Update
Appointat Aug 31, 2023
6531ab4
Update
Appointat Sep 1, 2023
daab66c
Remove role description specific code in role playing
lightaime Sep 8, 2023
6eb8007
Improve test
lightaime Sep 8, 2023
108366b
Improve init and import
lightaime Sep 8, 2023
f5024b2
Merge branch 'master' into feature/role-generation
lightaime Sep 8, 2023
e98ad5c
Merge branch 'master' into feature/role-generation
lightaime Sep 8, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions camel/agents/chat_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,8 @@ class ChatAgent(BaseAgent):
agent. (default: :obj:`None`)
function_list (Optional[List[OpenAIFunction]]): List of available
:obj:`OpenAIFunction`. (default: :obj:`None`)
role_description (str, optional): Description of the role
:obj:`str`. (default: :obj:`None`)
"""

def __init__(
Expand All @@ -132,12 +134,14 @@ def __init__(
message_window_size: Optional[int] = None,
output_language: Optional[str] = None,
function_list: Optional[List[OpenAIFunction]] = None,
role_description: Optional[str] = None,
) -> None:

self.orig_sys_message: BaseMessage = system_message
self.system_message = system_message
self.role_name: str = system_message.role_name
self.role_type: RoleType = system_message.role_type
self.role_description: Optional[str] = role_description
self.output_language: Optional[str] = output_language
if self.output_language is not None:
self.set_output_language(self.output_language)
Expand Down
125 changes: 125 additions & 0 deletions camel/agents/role_assignment.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
import re
from typing import Any, Dict, List, Optional, Tuple, Union

from tenacity import retry, stop_after_attempt, wait_exponential

from camel.agents import ChatAgent
from camel.messages import BaseMessage
from camel.prompts import TextPrompt
from camel.typing import ModelType, RoleType


class RoleAssignmentAgent(ChatAgent):
r"""
An agent that generates role names based on the task prompt.
Appointat marked this conversation as resolved.
Show resolved Hide resolved
Attributes:
role_assignment_prompt (TextPrompt): A prompt for the agent to generate
role names.
args:
Appointat marked this conversation as resolved.
Show resolved Hide resolved
model (ModelType): The tupe of model to use for the agent.
(default: :obj: 'ModelType.GPT_3_5_TURBO')
Appointat marked this conversation as resolved.
Show resolved Hide resolved
model_config (Any): The configuration for the model.
(default: :obj: 'None')
Appointat marked this conversation as resolved.
Show resolved Hide resolved
"""

def __init__(
self,
model: ModelType = ModelType.GPT_3_5_TURBO,
model_config: Optional[Any] = None,
) -> None:
self.role_assignment_prompt = TextPrompt(
'Given this task, "{task}", generate two role names, ' +
'one for the AI user and one for the AI assistant.')

system_message = BaseMessage(
role_name="Role Assigner",
role_type=RoleType.ASSISTANT,
meta_dict=None,
content="You assign roles based on tasks.",
)
super().__init__(system_message, model, model_config)

@retry(wait=wait_exponential(min=5, max=60), stop=stop_after_attempt(5))
def run_role_with_description(
self,
num_roles: Optional[int] = 2,
Appointat marked this conversation as resolved.
Show resolved Hide resolved
task_prompt: Union[str, TextPrompt] = "",
) -> Tuple[List[str], Dict[str, str], bool, Dict[str, Any]]:
r""" "
Generate role names based on the input task prompt.
Appointat marked this conversation as resolved.
Show resolved Hide resolved

Args:
num_roles (int): The number of roles to generate.
Appointat marked this conversation as resolved.
Show resolved Hide resolved
(default: :obj:`2`)
task_prompt (Union[str, TextPrompt]): The prompt
for the task based on which the roles are to be generated.
Appointat marked this conversation as resolved.
Show resolved Hide resolved

Returns:
Tuple[List[str], Dict[str, str], bool, Dict[str, Any]]: A tuple
"""
self.reset()

expert_prompt = "\n".join(
f"Domain expert {i + 1}: <|blank|>\n"
f"Associated competencies, professional characteristics, duties "
f"and workflows: <|blank|>. End.\n" for i in range(num_roles or 0))
role_assignment_generation_prompt = TextPrompt(
"You are the boss, you need to recruit experts in {num_roles} " +
"different fields to solve the task.\n" +
"Please tell me which domain experts should be recruited, " +
"and what competencies, professional characteristics, duties " +
"and workflows to complete the task.\n" +
"ONLY return the content in BLANK.\n\n" + "===== TASK =====\n" +
"{task}\n\n" + "===== PROMPT =====\n" + expert_prompt)
role_assignment_generation = role_assignment_generation_prompt.format(
num_roles=num_roles, task=task_prompt)

role_assignment_generation_msg = BaseMessage.make_user_message(
role_name="Role Assigner", content=role_assignment_generation)

response_completion = super().step(
input_message=role_assignment_generation_msg)

output_completion = response_completion.msg # type: BaseMessage
terminated = response_completion.terminated
info = response_completion.info

# Distribute the output completions into role names and descriptions
role_names = [
desc.replace("<|", "").replace("|>", "") for desc in re.findall(
r"Domain expert \d: (.+?)\nAssociated competencies,",
output_completion.content,
re.DOTALL,
)
]
role_descriptions = [
desc.replace("<|", "").replace("|>", "") for desc in re.findall(
r"Associated competencies, professional characteristics, "
r"duties and workflows: (.+?) End.", output_completion.content,
re.DOTALL)
]

if len(role_names) != num_roles or len(role_descriptions) != num_roles:
raise RuntimeError("Got None or insufficient Role messages. ")
if terminated:
raise RuntimeError("Role assignment failed.")

role_descriptions_dict = {
role_name: description
for role_name, description in zip(role_names, role_descriptions)
}

return role_names, role_descriptions_dict, terminated, info
5 changes: 5 additions & 0 deletions camel/generators.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,16 @@ class SystemMessageGenerator:
sys_msg_meta_dict_keys (Optional[Set[str]], optional): The set of keys
of the meta dictionary used to fill the prompts.
(default: :obj:`None`)
with_role_description (bool, optional): Whether to include the role
Appointat marked this conversation as resolved.
Show resolved Hide resolved
description in the system message. (default: :obj:`False`)
"""

def __init__(
self,
task_type: TaskType = TaskType.AI_SOCIETY,
sys_prompts: Optional[Dict[RoleType, str]] = None,
sys_msg_meta_dict_keys: Optional[Set[str]] = None,
with_role_description: bool = False,
) -> None:
self.sys_prompts: Dict[RoleType, str]

Expand All @@ -47,10 +50,12 @@ def __init__(
).get_system_prompt(
task_type,
RoleType.ASSISTANT,
with_role_description=with_role_description,
)
user_prompt_template = PromptTemplateGenerator().get_system_prompt(
task_type,
RoleType.USER,
with_role_description=with_role_description,
)
critic_prompt_template = PromptTemplateGenerator(
).get_system_prompt(
Expand Down
15 changes: 11 additions & 4 deletions camel/prompts/ai_society.py
Appointat marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,14 @@ class AISocietyPromptTemplateDict(TextPromptDict):
Please reply with the specified task in {word_limit} words or less. Do not add anything else."""
)

ASSISTANT_PROMPT: TextPrompt = TextPrompt(
"""Never forget you are a {assistant_role} and I am a {user_role}. Never flip roles! Never instruct me!
ROLE_DESCRIPTION_PROMPT = TextPrompt("""===== ROLES WITH DESCRIPTION =====
{user_role} and {assistant_role} are collaborating to complete a task: {task}
{user_role}'s competencies, professional characteristics, duties and workflows to complete the task: {user_description}
{assistant_role}'s competencies, professional characteristics, duties and workflows to complete the task: {assistant_description}
""")
Appointat marked this conversation as resolved.
Show resolved Hide resolved

ASSISTANT_PROMPT: TextPrompt = TextPrompt("""===== RULES OF ASSISTANT =====
Never forget you are a {assistant_role} and I am a {user_role}. Never flip roles! Never instruct me!
We share a common interest in collaborating to successfully complete a task.
You must help me to complete the task.
Here is the task: {task}. Never forget our task!
Expand All @@ -75,8 +81,8 @@ class AISocietyPromptTemplateDict(TextPromptDict):
<YOUR_SOLUTION> should be very specific, include detailed explanations and provide preferable detailed implementations and examples and lists for task-solving.
Always end <YOUR_SOLUTION> with: Next request.""")

USER_PROMPT: TextPrompt = TextPrompt(
"""Never forget you are a {user_role} and I am a {assistant_role}. Never flip roles! You will always instruct me.
USER_PROMPT: TextPrompt = TextPrompt("""===== RULES OF USER =====
Never forget you are a {user_role} and I am a {assistant_role}. Never flip roles! You will always instruct me.
We share a common interest in collaborating to successfully complete a task.
I must help you to complete the task.
Here is the task: {task}. Never forget our task!
Expand Down Expand Up @@ -115,6 +121,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None:
"generate_users": self.GENERATE_USERS,
"generate_tasks": self.GENERATE_TASKS,
"task_specify_prompt": self.TASK_SPECIFY_PROMPT,
"role_description": self.ROLE_DESCRIPTION_PROMPT,
RoleType.ASSISTANT: self.ASSISTANT_PROMPT,
RoleType.USER: self.USER_PROMPT,
RoleType.CRITIC: self.CRITIC_PROMPT,
Expand Down
26 changes: 22 additions & 4 deletions camel/prompts/prompt_templates.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,20 @@ def __init__(
self.task_prompt_template_dict = (task_prompt_template_dict
or TaskPromptTemplateDict())

def get_prompt_from_key(self, task_type: TaskType, key: Any) -> TextPrompt:
def get_prompt_from_key(
self,
task_type: TaskType,
key: Any,
with_role_description: bool = False,
) -> TextPrompt:
r"""Generates a text prompt using the specified :obj:`task_type` and
:obj:`key`.

Args:
task_type (TaskType): The type of task.
key (Any): The key used to generate the prompt.
with_role_description (bool, optional): Whether to include the
role description in the generated prompt. Defaults to False.

Returns:
TextPrompt: The generated text prompt.
Expand All @@ -50,7 +57,14 @@ def get_prompt_from_key(self, task_type: TaskType, key: Any) -> TextPrompt:
:obj:`task_type` and :obj:`key`.
"""
try:
return self.task_prompt_template_dict[task_type][key]
if (with_role_description):
Appointat marked this conversation as resolved.
Show resolved Hide resolved
role_description = self.task_prompt_template_dict[task_type][
"role_description"]
task_prompt_template = self.task_prompt_template_dict[
task_type][key]
return TextPrompt(role_description + task_prompt_template)
else:
return self.task_prompt_template_dict[task_type][key]

except KeyError:
raise KeyError("Failed to get generate prompt template for "
Expand All @@ -60,6 +74,7 @@ def get_system_prompt(
self,
task_type: TaskType,
role_type: RoleType,
with_role_description: bool = False,
) -> TextPrompt:
r"""Generates a text prompt for the system role, using the specified
:obj:`task_type` and :obj:`role_type`.
Expand All @@ -68,6 +83,8 @@ def get_system_prompt(
task_type (TaskType): The type of task.
role_type (RoleType): The type of role, either "USER" or
"ASSISTANT".
with_role_description (bool, optional): Whether to include the
role description in the generated prompt. Defaults to False.

Returns:
TextPrompt: The generated text prompt.
Expand All @@ -77,13 +94,14 @@ def get_system_prompt(
:obj:`task_type` and :obj:`role_type`.
"""
try:
return self.get_prompt_from_key(task_type, role_type)
return self.get_prompt_from_key(task_type, role_type,
with_role_description)

except KeyError:
prompt = "You are a helpful assistant."

warnings.warn("Failed to get system prompt template for "
f"task: {task_type.value}, role: {role_type.value}. "
f"task: {task_type.value}, role: {role_type}. "
f"Set template to: {prompt}")

return TextPrompt(prompt)
Expand Down
57 changes: 46 additions & 11 deletions camel/societies/role_playing.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,12 +121,32 @@ def __init__(
self.init_planned_task_prompt(task_planner_agent_kwargs,
output_language)

if (assistant_agent_kwargs is not None
and "role_description" in assistant_agent_kwargs
and user_agent_kwargs is not None
and "role_description" in user_agent_kwargs):
with_role_description = True
else:
with_role_description = False

sys_msg_generator = SystemMessageGenerator(
task_type=self.task_type, **(sys_msg_generator_kwargs or {}))
task_type=self.task_type,
with_role_description=with_role_description,
**(sys_msg_generator_kwargs or {}))

assistant_description = (None if assistant_agent_kwargs is None else
assistant_agent_kwargs.get(
"role_description", None))
user_description = (None if user_agent_kwargs is None else
user_agent_kwargs.get("role_description", None))
(init_assistant_sys_msg, init_user_sys_msg,
sys_msg_meta_dicts) = self.get_sys_message_info(
assistant_role_name, user_role_name, sys_msg_generator,
extend_sys_msg_meta_dicts)
assistant_role_name=assistant_role_name,
Appointat marked this conversation as resolved.
Show resolved Hide resolved
user_role_name=user_role_name,
assistant_description=assistant_description,
user_description=user_description,
sys_msg_generator=sys_msg_generator,
extend_sys_msg_meta_dicts=extend_sys_msg_meta_dicts)

self.assistant_agent: ChatAgent
self.user_agent: ChatAgent
Expand All @@ -139,7 +159,6 @@ def __init__(
user_agent_kwargs,
output_language,
)

self.critic: Optional[Union[CriticAgent, Human]] = None
self.critic_sys_msg: Optional[BaseMessage] = None
self.init_critic(critic_role_name, critic_criteria, critic_kwargs,
Expand Down Expand Up @@ -219,9 +238,13 @@ def init_planned_task_prompt(self,
self.planned_task_prompt = None

def get_sys_message_info(
self, assistant_role_name: str, user_role_name: str,
self,
assistant_role_name: str,
user_role_name: str,
sys_msg_generator: SystemMessageGenerator,
extend_sys_msg_meta_dicts: Optional[List[Dict]]
assistant_description: Optional[str] = None,
user_description: Optional[str] = None,
extend_sys_msg_meta_dicts: Optional[List[Dict]] = None,
) -> Tuple[BaseMessage, BaseMessage, List[Dict]]:
r"""Get initial assistant and user system message with a list of
system message meta dicts.
Expand All @@ -232,6 +255,9 @@ def get_sys_message_info(
user_role_name (str): The name of the role played by the user.
sys_msg_generator (SystemMessageGenerator): A system message
generator for agents.
assistant_description (str, optional): The description of the
assistant.
user_description (str, optional): The description of the user.
extend_sys_msg_meta_dicts (List[Dict], optional): A list of dicts
to extend the system message meta dicts with.

Expand All @@ -243,10 +269,20 @@ def get_sys_message_info(
sys_msg_meta_dicts = [dict(task=self.task_prompt) for _ in range(2)]
if (extend_sys_msg_meta_dicts is None and self.task_type
in [TaskType.AI_SOCIETY, TaskType.MISALIGNMENT]):
extend_sys_msg_meta_dicts = [
dict(assistant_role=assistant_role_name,
user_role=user_role_name) for _ in range(2)
]
if (assistant_description is not None
Appointat marked this conversation as resolved.
Show resolved Hide resolved
Appointat marked this conversation as resolved.
Show resolved Hide resolved
and user_description is not None):
extend_sys_msg_meta_dicts = [
dict(assistant_role=assistant_role_name,
user_role=user_role_name,
assistant_description=assistant_description,
user_description=user_description) for _ in range(2)
]
else:
extend_sys_msg_meta_dicts = [
dict(assistant_role=assistant_role_name,
user_role=user_role_name) for _ in range(2)
]

if extend_sys_msg_meta_dicts is not None:
sys_msg_meta_dicts = [{
**sys_msg_meta_dict,
Expand Down Expand Up @@ -444,7 +480,6 @@ def step(
whether the user agent terminated the conversation, and any
additional user information.
"""

user_response = self.user_agent.step(assistant_msg)
if user_response.terminated or user_response.msgs is None:
return (ChatAgentResponse([], False, {}),
Expand Down
Loading