From b86eb2b4df2e552d146955f2abc68ea4dce4637c Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Sun, 13 Aug 2023 21:15:52 +0200 Subject: [PATCH 01/33] Generate the roles and role description --- camel/agents/chat_agent.py | 6 +- camel/agents/role_assignment.py | 125 ++++++++++++++++++ camel/functions/openai_function.py | 2 +- camel/generators.py | 7 +- camel/prompts/ai_society.py | 17 ++- camel/prompts/prompt_templates.py | 28 +++- camel/societies/role_playing.py | 59 +++++++-- .../role_playing_with_role_generation.py | 103 +++++++++++++++ examples/test/test_ai_society_example.py | 9 +- test/agents/test_role_playing.py | 2 +- 10 files changed, 331 insertions(+), 27 deletions(-) create mode 100644 camel/agents/role_assignment.py create mode 100644 examples/ai_society/role_playing_with_role_generation.py diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 7b0226eff..49ea4c991 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -122,6 +122,8 @@ class ChatAgent(BaseAgent): agent. (default: :obj:`None`) function_list (Optional[List[OpenAIFunction]]): List of available :obj:`OpenAIFunction`. (default: :obj:`None`) + role_description (str, optional): Description of the role + :obj:`str`. (default: :obj:`None`) """ def __init__( @@ -132,12 +134,14 @@ def __init__( message_window_size: Optional[int] = None, output_language: Optional[str] = None, function_list: Optional[List[OpenAIFunction]] = None, + role_description: Optional[str] = None, ) -> None: self.orig_sys_message: BaseMessage = system_message self.system_message = system_message self.role_name: str = system_message.role_name self.role_type: RoleType = system_message.role_type + self.role_description: Optional[str] = role_description self.output_language: Optional[str] = output_language if self.output_language is not None: self.set_output_language(self.output_language) @@ -573,4 +577,4 @@ def __repr__(self) -> str: Returns: str: The string representation of the :obj:`ChatAgent`. """ - return f"ChatAgent({self.role_name}, {self.role_type}, {self.model})" + return f"ChatAgent({self.role_name}, {self.role_type}, {self.model})" \ No newline at end of file diff --git a/camel/agents/role_assignment.py b/camel/agents/role_assignment.py new file mode 100644 index 000000000..0c2681865 --- /dev/null +++ b/camel/agents/role_assignment.py @@ -0,0 +1,125 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +import re +from typing import Any, Dict, List, Optional, Tuple, Union + +from tenacity import retry, stop_after_attempt, wait_exponential + +from camel.agents import ChatAgent +from camel.messages import BaseMessage +from camel.prompts import TextPrompt +from camel.typing import ModelType, RoleType + + +class RoleAssignmentAgent(ChatAgent): + r""" + An agent that generates role names based on the task prompt. + Attributes: + role_assignment_prompt (TextPrompt): A prompt for the agent to generate + role names. + args: + model (ModelType): The tupe of model to use for the agent. + (default: :obj: 'ModelType.GPT_3_5_TURBO') + model_config (Any): The configuration for the model. + (default: :obj: 'None') + """ + + def __init__( + self, + model: ModelType = ModelType.GPT_3_5_TURBO, + model_config: Optional[Any] = None, + ) -> None: + self.role_assignment_prompt = TextPrompt( + 'Given this task, "{task}", generate two role names, ' + + 'one for the AI user and one for the AI assistant.') + + system_message = BaseMessage( + role_name="Role Assigner", + role_type=RoleType.ASSISTANT, + meta_dict=None, + content="You assign roles based on tasks.", + ) + super().__init__(system_message, model, model_config) + + @retry(wait=wait_exponential(min=5, max=60), stop=stop_after_attempt(5)) + def run_role_with_description( + self, + num_roles: Optional[int] = 2, + task_prompt: Union[str, TextPrompt] = "", + ) -> Tuple[List[str], Dict[str, str], bool, Dict[str, Any]]: + r""" " + Generate role names based on the input task prompt. + + Args: + num_roles (int): The number of roles to generate. + (default: :obj:`2`) + task_prompt (Union[str, TextPrompt]): The prompt + for the task based on which the roles are to be generated. + + Returns: + Tuple[List[str], Dict[str, str], bool, Dict[str, Any]]: A tuple + """ + self.reset() + + expert_prompt = "\n".join( + f"Domain expert {i + 1}: <|blank|>\n" + f"Associated competencies, professional characteristics, duties " + f"and workflows: <|blank|>. End.\n" for i in range(num_roles or 0)) + role_assignment_generation_prompt = TextPrompt( + "You are the boss, you need to recruit experts in {num_roles} " + + "different fields to solve the task.\n" + + "Please tell me which domain experts should be recruited, " + + "and what competencies, professional characteristics, duties " + + "and workflows to complete the task.\n" + + "ONLY return the content in BLANK.\n\n" + "===== TASK =====\n" + + "{task}\n\n" + "===== PROMPT =====\n" + expert_prompt) + role_assignment_generation = role_assignment_generation_prompt.format( + num_roles=num_roles, task=task_prompt) + + role_assignment_generation_msg = BaseMessage.make_user_message( + role_name="Role Assigner", content=role_assignment_generation) + + response_completion = super().step( + input_message=role_assignment_generation_msg) + + output_completion = response_completion.msg # type: BaseMessage + terminated = response_completion.terminated + info = response_completion.info + + # Distribute the output completions into role names and descriptions + role_names = [ + desc.replace("<|", "").replace("|>", "") for desc in re.findall( + r"Domain expert \d: (.+?)\nAssociated competencies,", + output_completion.content, + re.DOTALL, + ) + ] + role_descriptions = [ + desc.replace("<|", "").replace("|>", "") for desc in re.findall( + r"Associated competencies, professional characteristics, " + r"duties and workflows: (.+?) End.", output_completion.content, + re.DOTALL) + ] + + if len(role_names) != num_roles or len(role_descriptions) != num_roles: + raise RuntimeError("Got None or insufficient Role messages. ") + if terminated: + raise RuntimeError("Role assignment failed.") + + role_descriptions_dict = { + role_name: description + for role_name, description in zip(role_names, role_descriptions) + } + + return role_names, role_descriptions_dict, terminated, info \ No newline at end of file diff --git a/camel/functions/openai_function.py b/camel/functions/openai_function.py index d8ac8f608..1cdbf9a28 100644 --- a/camel/functions/openai_function.py +++ b/camel/functions/openai_function.py @@ -85,4 +85,4 @@ def as_dict(self) -> Dict[str, Any]: attr: getattr(self, attr) for attr in ["name", "description", "parameters"] if getattr(self, attr) is not None - } + } \ No newline at end of file diff --git a/camel/generators.py b/camel/generators.py index 2f94375b0..ea2b516f4 100644 --- a/camel/generators.py +++ b/camel/generators.py @@ -29,6 +29,8 @@ class SystemMessageGenerator: sys_msg_meta_dict_keys (Optional[Set[str]], optional): The set of keys of the meta dictionary used to fill the prompts. (default: :obj:`None`) + with_role_description (bool, optional): Whether to include the role + description in the system message. (default: :obj:`False`) """ def __init__( @@ -36,6 +38,7 @@ def __init__( task_type: TaskType = TaskType.AI_SOCIETY, sys_prompts: Optional[Dict[RoleType, str]] = None, sys_msg_meta_dict_keys: Optional[Set[str]] = None, + with_role_description: bool = False, ) -> None: self.sys_prompts: Dict[RoleType, str] @@ -47,10 +50,12 @@ def __init__( ).get_system_prompt( task_type, RoleType.ASSISTANT, + with_role_description=with_role_description, ) user_prompt_template = PromptTemplateGenerator().get_system_prompt( task_type, RoleType.USER, + with_role_description=with_role_description, ) critic_prompt_template = PromptTemplateGenerator( ).get_system_prompt( @@ -260,4 +265,4 @@ def from_role_files( def from_role_generator( self, role_generator: Generator[Tuple, None, None] ) -> Generator[str, None, None]: - raise NotImplementedError + raise NotImplementedError \ No newline at end of file diff --git a/camel/prompts/ai_society.py b/camel/prompts/ai_society.py index d9e1d3137..7edddedb4 100644 --- a/camel/prompts/ai_society.py +++ b/camel/prompts/ai_society.py @@ -58,8 +58,14 @@ class AISocietyPromptTemplateDict(TextPromptDict): Please reply with the specified task in {word_limit} words or less. Do not add anything else.""" ) - ASSISTANT_PROMPT: TextPrompt = TextPrompt( - """Never forget you are a {assistant_role} and I am a {user_role}. Never flip roles! Never instruct me! + ROLE_DESCRIPTION_PROMPT = TextPrompt("""===== ROLES WITH DESCRIPTION ===== +{user_role} and {assistant_role} are collaborating to complete a task: {task} +{user_role}'s competencies, professional characteristics, duties and workflows to complete the task: {user_description} +{assistant_role}'s competencies, professional characteristics, duties and workflows to complete the task: {assistant_description} +""") + + ASSISTANT_PROMPT: TextPrompt = TextPrompt("""===== RULES OF ASSISTANT ===== +Never forget you are a {assistant_role} and I am a {user_role}. Never flip roles! Never instruct me! We share a common interest in collaborating to successfully complete a task. You must help me to complete the task. Here is the task: {task}. Never forget our task! @@ -75,8 +81,8 @@ class AISocietyPromptTemplateDict(TextPromptDict): should be very specific, include detailed explanations and provide preferable detailed implementations and examples and lists for task-solving. Always end with: Next request.""") - USER_PROMPT: TextPrompt = TextPrompt( - """Never forget you are a {user_role} and I am a {assistant_role}. Never flip roles! You will always instruct me. + USER_PROMPT: TextPrompt = TextPrompt("""===== RULES OF USER ===== +Never forget you are a {user_role} and I am a {assistant_role}. Never flip roles! You will always instruct me. We share a common interest in collaborating to successfully complete a task. I must help you to complete the task. Here is the task: {task}. Never forget our task! @@ -115,7 +121,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: "generate_users": self.GENERATE_USERS, "generate_tasks": self.GENERATE_TASKS, "task_specify_prompt": self.TASK_SPECIFY_PROMPT, + "role_description": self.ROLE_DESCRIPTION_PROMPT, RoleType.ASSISTANT: self.ASSISTANT_PROMPT, RoleType.USER: self.USER_PROMPT, RoleType.CRITIC: self.CRITIC_PROMPT, - }) + }) \ No newline at end of file diff --git a/camel/prompts/prompt_templates.py b/camel/prompts/prompt_templates.py index 005fb94e8..93f747917 100644 --- a/camel/prompts/prompt_templates.py +++ b/camel/prompts/prompt_templates.py @@ -34,13 +34,20 @@ def __init__( self.task_prompt_template_dict = (task_prompt_template_dict or TaskPromptTemplateDict()) - def get_prompt_from_key(self, task_type: TaskType, key: Any) -> TextPrompt: + def get_prompt_from_key( + self, + task_type: TaskType, + key: Any, + with_role_description: bool = False, + ) -> TextPrompt: r"""Generates a text prompt using the specified :obj:`task_type` and :obj:`key`. Args: task_type (TaskType): The type of task. key (Any): The key used to generate the prompt. + with_role_description (bool, optional): Whether to include the + role description in the generated prompt. Defaults to False. Returns: TextPrompt: The generated text prompt. @@ -50,7 +57,14 @@ def get_prompt_from_key(self, task_type: TaskType, key: Any) -> TextPrompt: :obj:`task_type` and :obj:`key`. """ try: - return self.task_prompt_template_dict[task_type][key] + if (with_role_description): + role_description = self.task_prompt_template_dict[task_type][ + "role_description"] + task_prompt_template = self.task_prompt_template_dict[ + task_type][key] + return TextPrompt(role_description + task_prompt_template) + else: + return self.task_prompt_template_dict[task_type][key] except KeyError: raise KeyError("Failed to get generate prompt template for " @@ -60,6 +74,7 @@ def get_system_prompt( self, task_type: TaskType, role_type: RoleType, + with_role_description: bool = False, ) -> TextPrompt: r"""Generates a text prompt for the system role, using the specified :obj:`task_type` and :obj:`role_type`. @@ -68,6 +83,8 @@ def get_system_prompt( task_type (TaskType): The type of task. role_type (RoleType): The type of role, either "USER" or "ASSISTANT". + with_role_description (bool, optional): Whether to include the + role description in the generated prompt. Defaults to False. Returns: TextPrompt: The generated text prompt. @@ -77,13 +94,14 @@ def get_system_prompt( :obj:`task_type` and :obj:`role_type`. """ try: - return self.get_prompt_from_key(task_type, role_type) + return self.get_prompt_from_key(task_type, role_type, + with_role_description) except KeyError: prompt = "You are a helpful assistant." warnings.warn("Failed to get system prompt template for " - f"task: {task_type.value}, role: {role_type.value}. " + f"task: {task_type.value}, role: {role_type}. " f"Set template to: {prompt}") return TextPrompt(prompt) @@ -114,4 +132,4 @@ def get_task_specify_prompt( Returns: TextPrompt: The generated prompt for specifying a task. """ - return self.get_prompt_from_key(task_type, "task_specify_prompt") + return self.get_prompt_from_key(task_type, "task_specify_prompt") \ No newline at end of file diff --git a/camel/societies/role_playing.py b/camel/societies/role_playing.py index 7e0044354..e9b85dc8f 100644 --- a/camel/societies/role_playing.py +++ b/camel/societies/role_playing.py @@ -121,12 +121,32 @@ def __init__( self.init_planned_task_prompt(task_planner_agent_kwargs, output_language) + if (assistant_agent_kwargs is not None + and "role_description" in assistant_agent_kwargs + and user_agent_kwargs is not None + and "role_description" in user_agent_kwargs): + with_role_description = True + else: + with_role_description = False + sys_msg_generator = SystemMessageGenerator( - task_type=self.task_type, **(sys_msg_generator_kwargs or {})) + task_type=self.task_type, + with_role_description=with_role_description, + **(sys_msg_generator_kwargs or {})) + + assistant_description = (None if assistant_agent_kwargs is None else + assistant_agent_kwargs.get( + "role_description", None)) + user_description = (None if user_agent_kwargs is None else + user_agent_kwargs.get("role_description", None)) (init_assistant_sys_msg, init_user_sys_msg, sys_msg_meta_dicts) = self.get_sys_message_info( - assistant_role_name, user_role_name, sys_msg_generator, - extend_sys_msg_meta_dicts) + assistant_role_name=assistant_role_name, + user_role_name=user_role_name, + assistant_description=assistant_description, + user_description=user_description, + sys_msg_generator=sys_msg_generator, + extend_sys_msg_meta_dicts=extend_sys_msg_meta_dicts) self.assistant_agent: ChatAgent self.user_agent: ChatAgent @@ -139,7 +159,6 @@ def __init__( user_agent_kwargs, output_language, ) - self.critic: Optional[Union[CriticAgent, Human]] = None self.critic_sys_msg: Optional[BaseMessage] = None self.init_critic(critic_role_name, critic_criteria, critic_kwargs, @@ -219,9 +238,13 @@ def init_planned_task_prompt(self, self.planned_task_prompt = None def get_sys_message_info( - self, assistant_role_name: str, user_role_name: str, + self, + assistant_role_name: str, + user_role_name: str, sys_msg_generator: SystemMessageGenerator, - extend_sys_msg_meta_dicts: Optional[List[Dict]] + assistant_description: Optional[str] = None, + user_description: Optional[str] = None, + extend_sys_msg_meta_dicts: Optional[List[Dict]] = None, ) -> Tuple[BaseMessage, BaseMessage, List[Dict]]: r"""Get initial assistant and user system message with a list of system message meta dicts. @@ -232,6 +255,9 @@ def get_sys_message_info( user_role_name (str): The name of the role played by the user. sys_msg_generator (SystemMessageGenerator): A system message generator for agents. + assistant_description (str, optional): The description of the + assistant. + user_description (str, optional): The description of the user. extend_sys_msg_meta_dicts (List[Dict], optional): A list of dicts to extend the system message meta dicts with. @@ -243,10 +269,20 @@ def get_sys_message_info( sys_msg_meta_dicts = [dict(task=self.task_prompt) for _ in range(2)] if (extend_sys_msg_meta_dicts is None and self.task_type in [TaskType.AI_SOCIETY, TaskType.MISALIGNMENT]): - extend_sys_msg_meta_dicts = [ - dict(assistant_role=assistant_role_name, - user_role=user_role_name) for _ in range(2) - ] + if (assistant_description is not None + and user_description is not None): + extend_sys_msg_meta_dicts = [ + dict(assistant_role=assistant_role_name, + user_role=user_role_name, + assistant_description=assistant_description, + user_description=user_description) for _ in range(2) + ] + else: + extend_sys_msg_meta_dicts = [ + dict(assistant_role=assistant_role_name, + user_role=user_role_name) for _ in range(2) + ] + if extend_sys_msg_meta_dicts is not None: sys_msg_meta_dicts = [{ **sys_msg_meta_dict, @@ -444,7 +480,6 @@ def step( whether the user agent terminated the conversation, and any additional user information. """ - user_response = self.user_agent.step(assistant_msg) if user_response.terminated or user_response.msgs is None: return (ChatAgentResponse([], False, {}), @@ -466,4 +501,4 @@ def step( assistant_response.info), ChatAgentResponse([user_msg], user_response.terminated, user_response.info), - ) + ) \ No newline at end of file diff --git a/examples/ai_society/role_playing_with_role_generation.py b/examples/ai_society/role_playing_with_role_generation.py new file mode 100644 index 000000000..bfd8e36ea --- /dev/null +++ b/examples/ai_society/role_playing_with_role_generation.py @@ -0,0 +1,103 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +from colorama import Fore + +from camel.agents.role_assignment import RoleAssignmentAgent +from camel.configs import ChatGPTConfig +from camel.societies import RolePlaying +from camel.utils import print_text_animated + +AI_ASSISTANT_ROLE_INDEX = 0 +AI_USER_ROLE_INDEX = 1 + + +def main(model_type=None) -> None: + task_prompt = "Develop a trading bot for the stock market." + + model_config_description = ChatGPTConfig() + role_description_agent = RoleAssignmentAgent( + model=model_type, model_config=model_config_description) + + role_names, role_description_dict, _, _ = ( + role_description_agent.run_role_with_description( + num_roles=2, task_prompt=task_prompt)) + + ai_assistant_role = role_names[AI_ASSISTANT_ROLE_INDEX] + ai_user_role = role_names[AI_USER_ROLE_INDEX] + + role_play_session = RolePlaying( + task_prompt=task_prompt, + with_task_specify=True, + assistant_role_name=ai_assistant_role, + user_role_name=ai_user_role, + assistant_agent_kwargs=dict( + model=model_type, + role_description=role_description_dict[ai_assistant_role]), + user_agent_kwargs=dict( + model=model_type, + role_description=role_description_dict[ai_user_role]), + task_specify_agent_kwargs=dict(model=model_type), + ) + + print( + Fore.GREEN + + f"AI Assistant sys message:\n{role_play_session.assistant_sys_msg}\n") + print(Fore.BLUE + + f"AI User sys message:\n{role_play_session.user_sys_msg}\n") + print(Fore.GREEN + f"AI Assistant role description:\n" + f"{role_play_session.assistant_sys_msg.role_name}\n" + f"{role_description_dict[ai_assistant_role]}\n") + print(Fore.BLUE + f"AI User role description:\n" + f"{role_play_session.user_sys_msg.role_name}\n" + f"{role_description_dict[ai_user_role]}\n") + + print(Fore.YELLOW + f"Original task prompt:\n{task_prompt}\n") + print( + Fore.CYAN + + f"Specified task prompt:\n{role_play_session.specified_task_prompt}\n") + print(Fore.RED + f"Final task prompt:\n{role_play_session.task_prompt}\n") + + chat_turn_limit, n = 50, 0 + input_assistant_msg, _ = role_play_session.init_chat() + while n < chat_turn_limit: + n += 1 + assistant_response, user_response = role_play_session.step( + input_assistant_msg) + + if assistant_response.terminated: + print(Fore.GREEN + ( + "AI Assistant terminated. " + f"Reason: {assistant_response.info['termination_reasons']}.")) + break + if user_response.terminated: + print(Fore.GREEN + + ("AI User terminated. " + f"Reason: {user_response.info['termination_reasons']}.")) + break + + print_text_animated( + Fore.BLUE + + f"AI User: {ai_user_role}\n\n{user_response.msg.content}\n") + print_text_animated(Fore.GREEN + + f"AI Assistant:{ai_assistant_role}\n\n" + + f"{assistant_response.msg.content}\n") + + if "CAMEL_TASK_DONE" in user_response.msg.content: + break + + input_assistant_msg = assistant_response.msg + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/examples/test/test_ai_society_example.py b/examples/test/test_ai_society_example.py index 89410bcb2..acc9f7330 100644 --- a/examples/test/test_ai_society_example.py +++ b/examples/test/test_ai_society_example.py @@ -14,6 +14,7 @@ from mock import patch import examples.ai_society.role_playing +import examples.ai_society.role_playing_with_role_generation import examples.function_call.role_playing_with_function from camel.typing import ModelType @@ -23,6 +24,12 @@ def test_ai_society_role_playing_example(): examples.ai_society.role_playing.main(ModelType.STUB) +def test_role_playing_with_role_generation_example(): + with patch('time.sleep', return_value=None): + examples.ai_society.role_playing_with_role_generation.main( + ModelType.GPT_3_5_TURBO) + + def test_role_playing_with_function_example(): with patch('time.sleep', return_value=None): - examples.function_call.role_playing_with_function.main(ModelType.STUB) + examples.function_call.role_playing_with_function.main(ModelType.STUB) \ No newline at end of file diff --git a/test/agents/test_role_playing.py b/test/agents/test_role_playing.py index dca115dc5..43a88e028 100644 --- a/test/agents/test_role_playing.py +++ b/test/agents/test_role_playing.py @@ -114,4 +114,4 @@ def test_role_playing_step(task_type, extend_sys_msg_meta_dicts, assert isinstance(response.msgs[0], BaseMessage) assert isinstance(response.terminated, bool) assert response.terminated is False - assert isinstance(response.info, dict) + assert isinstance(response.info, dict) \ No newline at end of file From 24b2f0bcbca8c4a1eb1170b9576643ab683e04de Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Sun, 13 Aug 2023 21:28:10 +0200 Subject: [PATCH 02/33] Add newline at end of file --- camel/agents/chat_agent.py | 2 +- camel/agents/role_assignment.py | 2 +- camel/functions/openai_function.py | 2 +- camel/generators.py | 2 +- camel/prompts/ai_society.py | 2 +- camel/prompts/prompt_templates.py | 2 +- camel/societies/role_playing.py | 2 +- examples/ai_society/role_playing_with_role_generation.py | 2 +- examples/test/test_ai_society_example.py | 2 +- test/agents/test_role_playing.py | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 49ea4c991..7d47ea7f0 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -577,4 +577,4 @@ def __repr__(self) -> str: Returns: str: The string representation of the :obj:`ChatAgent`. """ - return f"ChatAgent({self.role_name}, {self.role_type}, {self.model})" \ No newline at end of file + return f"ChatAgent({self.role_name}, {self.role_type}, {self.model})" diff --git a/camel/agents/role_assignment.py b/camel/agents/role_assignment.py index 0c2681865..e54837f93 100644 --- a/camel/agents/role_assignment.py +++ b/camel/agents/role_assignment.py @@ -122,4 +122,4 @@ def run_role_with_description( for role_name, description in zip(role_names, role_descriptions) } - return role_names, role_descriptions_dict, terminated, info \ No newline at end of file + return role_names, role_descriptions_dict, terminated, info diff --git a/camel/functions/openai_function.py b/camel/functions/openai_function.py index 1cdbf9a28..d8ac8f608 100644 --- a/camel/functions/openai_function.py +++ b/camel/functions/openai_function.py @@ -85,4 +85,4 @@ def as_dict(self) -> Dict[str, Any]: attr: getattr(self, attr) for attr in ["name", "description", "parameters"] if getattr(self, attr) is not None - } \ No newline at end of file + } diff --git a/camel/generators.py b/camel/generators.py index ea2b516f4..ac17a89fd 100644 --- a/camel/generators.py +++ b/camel/generators.py @@ -265,4 +265,4 @@ def from_role_files( def from_role_generator( self, role_generator: Generator[Tuple, None, None] ) -> Generator[str, None, None]: - raise NotImplementedError \ No newline at end of file + raise NotImplementedError diff --git a/camel/prompts/ai_society.py b/camel/prompts/ai_society.py index 7edddedb4..59f84d1ad 100644 --- a/camel/prompts/ai_society.py +++ b/camel/prompts/ai_society.py @@ -125,4 +125,4 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: RoleType.ASSISTANT: self.ASSISTANT_PROMPT, RoleType.USER: self.USER_PROMPT, RoleType.CRITIC: self.CRITIC_PROMPT, - }) \ No newline at end of file + }) diff --git a/camel/prompts/prompt_templates.py b/camel/prompts/prompt_templates.py index 93f747917..885bbe050 100644 --- a/camel/prompts/prompt_templates.py +++ b/camel/prompts/prompt_templates.py @@ -132,4 +132,4 @@ def get_task_specify_prompt( Returns: TextPrompt: The generated prompt for specifying a task. """ - return self.get_prompt_from_key(task_type, "task_specify_prompt") \ No newline at end of file + return self.get_prompt_from_key(task_type, "task_specify_prompt") diff --git a/camel/societies/role_playing.py b/camel/societies/role_playing.py index e9b85dc8f..2b71505cd 100644 --- a/camel/societies/role_playing.py +++ b/camel/societies/role_playing.py @@ -501,4 +501,4 @@ def step( assistant_response.info), ChatAgentResponse([user_msg], user_response.terminated, user_response.info), - ) \ No newline at end of file + ) diff --git a/examples/ai_society/role_playing_with_role_generation.py b/examples/ai_society/role_playing_with_role_generation.py index bfd8e36ea..e1f7d5931 100644 --- a/examples/ai_society/role_playing_with_role_generation.py +++ b/examples/ai_society/role_playing_with_role_generation.py @@ -100,4 +100,4 @@ def main(model_type=None) -> None: if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/examples/test/test_ai_society_example.py b/examples/test/test_ai_society_example.py index acc9f7330..7b5432bc0 100644 --- a/examples/test/test_ai_society_example.py +++ b/examples/test/test_ai_society_example.py @@ -32,4 +32,4 @@ def test_role_playing_with_role_generation_example(): def test_role_playing_with_function_example(): with patch('time.sleep', return_value=None): - examples.function_call.role_playing_with_function.main(ModelType.STUB) \ No newline at end of file + examples.function_call.role_playing_with_function.main(ModelType.STUB) diff --git a/test/agents/test_role_playing.py b/test/agents/test_role_playing.py index 43a88e028..dca115dc5 100644 --- a/test/agents/test_role_playing.py +++ b/test/agents/test_role_playing.py @@ -114,4 +114,4 @@ def test_role_playing_step(task_type, extend_sys_msg_meta_dicts, assert isinstance(response.msgs[0], BaseMessage) assert isinstance(response.terminated, bool) assert response.terminated is False - assert isinstance(response.info, dict) \ No newline at end of file + assert isinstance(response.info, dict) From c1562d3ee5801d7d94dcb0825644a76513f4e390 Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Tue, 15 Aug 2023 17:13:14 +0200 Subject: [PATCH 03/33] Add a test for the role_assignment AI agent --- examples/ai_society/role_assignment.py | 43 ++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 examples/ai_society/role_assignment.py diff --git a/examples/ai_society/role_assignment.py b/examples/ai_society/role_assignment.py new file mode 100644 index 000000000..7c2a1023e --- /dev/null +++ b/examples/ai_society/role_assignment.py @@ -0,0 +1,43 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +from colorama import Fore + +from camel.agents.role_assignment import RoleAssignmentAgent +from camel.configs import ChatGPTConfig + + +def main(model_type=None, num_roles=3) -> None: + task_prompt = "Develop a trading bot for the stock market." + + model_config_description = ChatGPTConfig() + role_description_agent = RoleAssignmentAgent( + model=model_type, model_config=model_config_description) + + role_names, role_description_dict, _, _ = ( + role_description_agent.run_role_with_description( + num_roles=num_roles, task_prompt=task_prompt)) + + if (len(role_names) != num_roles): + raise ValueError(f"Length of role_names ({len(role_names)}) " + f"does not equal to num_roles ({num_roles}).") + + print(Fore.YELLOW + f"Original task prompt:\n{task_prompt}\n") + print(Fore.GREEN + f"List of {num_roles} roles with description:") + for role_name in role_names: + print(Fore.BLUE + f"{role_name}:\n" + f"{role_description_dict[role_name]}\n") + + +if __name__ == "__main__": + main() From 5f79d384e0949e1db21d1865d1c07d042f9d59d7 Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Tue, 15 Aug 2023 20:58:32 +0200 Subject: [PATCH 04/33] Update according to the comments --- camel/prompts/prompt_templates.py | 2 +- camel/societies/role_playing.py | 15 ++++++++------- examples/test/test_ai_society_example.py | 6 ++++++ 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/camel/prompts/prompt_templates.py b/camel/prompts/prompt_templates.py index 885bbe050..b471253da 100644 --- a/camel/prompts/prompt_templates.py +++ b/camel/prompts/prompt_templates.py @@ -57,7 +57,7 @@ def get_prompt_from_key( :obj:`task_type` and :obj:`key`. """ try: - if (with_role_description): + if with_role_description: role_description = self.task_prompt_template_dict[task_type][ "role_description"] task_prompt_template = self.task_prompt_template_dict[ diff --git a/camel/societies/role_playing.py b/camel/societies/role_playing.py index 2b71505cd..16298f644 100644 --- a/camel/societies/role_playing.py +++ b/camel/societies/role_playing.py @@ -141,12 +141,9 @@ def __init__( user_agent_kwargs.get("role_description", None)) (init_assistant_sys_msg, init_user_sys_msg, sys_msg_meta_dicts) = self.get_sys_message_info( - assistant_role_name=assistant_role_name, - user_role_name=user_role_name, - assistant_description=assistant_description, - user_description=user_description, - sys_msg_generator=sys_msg_generator, - extend_sys_msg_meta_dicts=extend_sys_msg_meta_dicts) + assistant_role_name, user_role_name, sys_msg_generator, + assistant_description, user_description, + extend_sys_msg_meta_dicts) self.assistant_agent: ChatAgent self.user_agent: ChatAgent @@ -277,11 +274,15 @@ def get_sys_message_info( assistant_description=assistant_description, user_description=user_description) for _ in range(2) ] - else: + elif (assistant_description is None and user_description is None): extend_sys_msg_meta_dicts = [ dict(assistant_role=assistant_role_name, user_role=user_role_name) for _ in range(2) ] + else: + raise ValueError( + "Both assistant and user descriptions should " + "either be 'None' or both should have values.") if extend_sys_msg_meta_dicts is not None: sys_msg_meta_dicts = [{ diff --git a/examples/test/test_ai_society_example.py b/examples/test/test_ai_society_example.py index 7b5432bc0..4a7d884f0 100644 --- a/examples/test/test_ai_society_example.py +++ b/examples/test/test_ai_society_example.py @@ -13,6 +13,7 @@ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== from mock import patch +import examples.ai_society.role_assignment import examples.ai_society.role_playing import examples.ai_society.role_playing_with_role_generation import examples.function_call.role_playing_with_function @@ -24,6 +25,11 @@ def test_ai_society_role_playing_example(): examples.ai_society.role_playing.main(ModelType.STUB) +def test_ai_society_role_assignment_example(): + with patch('time.sleep', return_value=None): + examples.ai_society.role_assignment.main(ModelType.GPT_3_5_TURBO) + + def test_role_playing_with_role_generation_example(): with patch('time.sleep', return_value=None): examples.ai_society.role_playing_with_role_generation.main( From 022f6238194f625d27f2488668d7783e0e36f4de Mon Sep 17 00:00:00 2001 From: Appointat <65004114+Appointat@users.noreply.github.com> Date: Fri, 18 Aug 2023 11:21:14 +0200 Subject: [PATCH 05/33] Update camel/agents/role_assignment.py Co-authored-by: Guohao Li --- camel/agents/role_assignment.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/camel/agents/role_assignment.py b/camel/agents/role_assignment.py index e54837f93..ac32dc719 100644 --- a/camel/agents/role_assignment.py +++ b/camel/agents/role_assignment.py @@ -29,8 +29,8 @@ class RoleAssignmentAgent(ChatAgent): role_assignment_prompt (TextPrompt): A prompt for the agent to generate role names. args: - model (ModelType): The tupe of model to use for the agent. - (default: :obj: 'ModelType.GPT_3_5_TURBO') + model (ModelType, optional): The type of model to use for the agent. + (default: :obj:`ModelType.GPT_3_5_TURBO`) model_config (Any): The configuration for the model. (default: :obj: 'None') """ From 864f59ae75e3df783162134578cf14da145591fd Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Fri, 18 Aug 2023 11:21:57 +0200 Subject: [PATCH 06/33] Update role_assignment.py --- camel/agents/role_assignment.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/camel/agents/role_assignment.py b/camel/agents/role_assignment.py index e54837f93..8ba0a7b60 100644 --- a/camel/agents/role_assignment.py +++ b/camel/agents/role_assignment.py @@ -23,8 +23,7 @@ class RoleAssignmentAgent(ChatAgent): - r""" - An agent that generates role names based on the task prompt. + r"""An agent that generates role names based on the task prompt. Attributes: role_assignment_prompt (TextPrompt): A prompt for the agent to generate role names. @@ -58,8 +57,7 @@ def run_role_with_description( num_roles: Optional[int] = 2, task_prompt: Union[str, TextPrompt] = "", ) -> Tuple[List[str], Dict[str, str], bool, Dict[str, Any]]: - r""" " - Generate role names based on the input task prompt. + r"""Generate role names based on the input task prompt. Args: num_roles (int): The number of roles to generate. From a0bf2c09197a0acc812dd61c6faa3478a10f00f2 Mon Sep 17 00:00:00 2001 From: Appointat <65004114+Appointat@users.noreply.github.com> Date: Fri, 18 Aug 2023 11:25:14 +0200 Subject: [PATCH 07/33] Update camel/agents/role_assignment.py Co-authored-by: Guohao Li --- camel/agents/role_assignment.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/camel/agents/role_assignment.py b/camel/agents/role_assignment.py index ab20053c8..6c516d6a6 100644 --- a/camel/agents/role_assignment.py +++ b/camel/agents/role_assignment.py @@ -27,7 +27,8 @@ class RoleAssignmentAgent(ChatAgent): Attributes: role_assignment_prompt (TextPrompt): A prompt for the agent to generate role names. - args: + + Args: model (ModelType, optional): The type of model to use for the agent. (default: :obj:`ModelType.GPT_3_5_TURBO`) model_config (Any): The configuration for the model. From b702c771f0f1540f4a445e3b5ba02edd56ea63d5 Mon Sep 17 00:00:00 2001 From: Appointat <65004114+Appointat@users.noreply.github.com> Date: Fri, 18 Aug 2023 11:25:26 +0200 Subject: [PATCH 08/33] Update camel/agents/role_assignment.py Co-authored-by: Guohao Li --- camel/agents/role_assignment.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/camel/agents/role_assignment.py b/camel/agents/role_assignment.py index 6c516d6a6..2c45571e8 100644 --- a/camel/agents/role_assignment.py +++ b/camel/agents/role_assignment.py @@ -31,8 +31,8 @@ class RoleAssignmentAgent(ChatAgent): Args: model (ModelType, optional): The type of model to use for the agent. (default: :obj:`ModelType.GPT_3_5_TURBO`) - model_config (Any): The configuration for the model. - (default: :obj: 'None') + model_config (Any, optional): The configuration for the model. + (default: :obj:`None`) """ def __init__( From 04bdc241ee7ffd40f0cda6297a502ca555613c33 Mon Sep 17 00:00:00 2001 From: Appointat <65004114+Appointat@users.noreply.github.com> Date: Fri, 18 Aug 2023 11:25:39 +0200 Subject: [PATCH 09/33] Update camel/agents/role_assignment.py Co-authored-by: Guohao Li --- camel/agents/role_assignment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/camel/agents/role_assignment.py b/camel/agents/role_assignment.py index 2c45571e8..47f27b950 100644 --- a/camel/agents/role_assignment.py +++ b/camel/agents/role_assignment.py @@ -61,7 +61,7 @@ def run_role_with_description( r"""Generate role names based on the input task prompt. Args: - num_roles (int): The number of roles to generate. + num_roles (int, optional): The number of roles to generate. (default: :obj:`2`) task_prompt (Union[str, TextPrompt]): The prompt for the task based on which the roles are to be generated. From 212a3f2b01bcc63df8bf4690a3944895df4269fa Mon Sep 17 00:00:00 2001 From: Appointat <65004114+Appointat@users.noreply.github.com> Date: Fri, 18 Aug 2023 11:26:08 +0200 Subject: [PATCH 10/33] Update camel/agents/role_assignment.py Co-authored-by: Guohao Li --- camel/agents/role_assignment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/camel/agents/role_assignment.py b/camel/agents/role_assignment.py index 47f27b950..1598b5079 100644 --- a/camel/agents/role_assignment.py +++ b/camel/agents/role_assignment.py @@ -55,7 +55,7 @@ def __init__( @retry(wait=wait_exponential(min=5, max=60), stop=stop_after_attempt(5)) def run_role_with_description( self, - num_roles: Optional[int] = 2, + num_roles: int = 2, task_prompt: Union[str, TextPrompt] = "", ) -> Tuple[List[str], Dict[str, str], bool, Dict[str, Any]]: r"""Generate role names based on the input task prompt. From 83999e7cf5672ad04f804f654a111cdd97ec9baa Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Fri, 18 Aug 2023 11:58:03 +0200 Subject: [PATCH 11/33] Update --- camel/agents/role_assignment.py | 8 ++++---- examples/ai_society/role_assignment.py | 2 +- examples/ai_society/role_playing_with_role_generation.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/camel/agents/role_assignment.py b/camel/agents/role_assignment.py index 1598b5079..13551b35a 100644 --- a/camel/agents/role_assignment.py +++ b/camel/agents/role_assignment.py @@ -27,7 +27,7 @@ class RoleAssignmentAgent(ChatAgent): Attributes: role_assignment_prompt (TextPrompt): A prompt for the agent to generate role names. - + Args: model (ModelType, optional): The type of model to use for the agent. (default: :obj:`ModelType.GPT_3_5_TURBO`) @@ -55,16 +55,16 @@ def __init__( @retry(wait=wait_exponential(min=5, max=60), stop=stop_after_attempt(5)) def run_role_with_description( self, + task_prompt: Union[str, TextPrompt], num_roles: int = 2, - task_prompt: Union[str, TextPrompt] = "", ) -> Tuple[List[str], Dict[str, str], bool, Dict[str, Any]]: r"""Generate role names based on the input task prompt. Args: - num_roles (int, optional): The number of roles to generate. - (default: :obj:`2`) task_prompt (Union[str, TextPrompt]): The prompt for the task based on which the roles are to be generated. + num_roles (int, optional): The number of roles to generate. + (default: :obj:`2`) Returns: Tuple[List[str], Dict[str, str], bool, Dict[str, Any]]: A tuple diff --git a/examples/ai_society/role_assignment.py b/examples/ai_society/role_assignment.py index 7c2a1023e..7188bc0ec 100644 --- a/examples/ai_society/role_assignment.py +++ b/examples/ai_society/role_assignment.py @@ -26,7 +26,7 @@ def main(model_type=None, num_roles=3) -> None: role_names, role_description_dict, _, _ = ( role_description_agent.run_role_with_description( - num_roles=num_roles, task_prompt=task_prompt)) + task_prompt=task_prompt, num_roles=num_roles)) if (len(role_names) != num_roles): raise ValueError(f"Length of role_names ({len(role_names)}) " diff --git a/examples/ai_society/role_playing_with_role_generation.py b/examples/ai_society/role_playing_with_role_generation.py index e1f7d5931..ec72d808c 100644 --- a/examples/ai_society/role_playing_with_role_generation.py +++ b/examples/ai_society/role_playing_with_role_generation.py @@ -31,7 +31,7 @@ def main(model_type=None) -> None: role_names, role_description_dict, _, _ = ( role_description_agent.run_role_with_description( - num_roles=2, task_prompt=task_prompt)) + task_prompt=task_prompt, num_roles=2)) ai_assistant_role = role_names[AI_ASSISTANT_ROLE_INDEX] ai_user_role = role_names[AI_USER_ROLE_INDEX] From 07adeb07a9322efedc6c5b57bb8885f7255ace4c Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Fri, 18 Aug 2023 23:34:45 +0200 Subject: [PATCH 12/33] Craete the new TaskType.ROLE_DESCRIPTION --- camel/generators.py | 5 -- camel/prompts/__init__.py | 2 + camel/prompts/prompt_templates.py | 18 +---- .../role_description_prompt_template.py | 54 +++++++++++++++ camel/prompts/task_prompt_template.py | 3 + camel/societies/role_playing.py | 68 +++++++------------ camel/typing.py | 1 + 7 files changed, 88 insertions(+), 63 deletions(-) create mode 100644 camel/prompts/role_description_prompt_template.py diff --git a/camel/generators.py b/camel/generators.py index ac17a89fd..2f94375b0 100644 --- a/camel/generators.py +++ b/camel/generators.py @@ -29,8 +29,6 @@ class SystemMessageGenerator: sys_msg_meta_dict_keys (Optional[Set[str]], optional): The set of keys of the meta dictionary used to fill the prompts. (default: :obj:`None`) - with_role_description (bool, optional): Whether to include the role - description in the system message. (default: :obj:`False`) """ def __init__( @@ -38,7 +36,6 @@ def __init__( task_type: TaskType = TaskType.AI_SOCIETY, sys_prompts: Optional[Dict[RoleType, str]] = None, sys_msg_meta_dict_keys: Optional[Set[str]] = None, - with_role_description: bool = False, ) -> None: self.sys_prompts: Dict[RoleType, str] @@ -50,12 +47,10 @@ def __init__( ).get_system_prompt( task_type, RoleType.ASSISTANT, - with_role_description=with_role_description, ) user_prompt_template = PromptTemplateGenerator().get_system_prompt( task_type, RoleType.USER, - with_role_description=with_role_description, ) critic_prompt_template = PromptTemplateGenerator( ).get_system_prompt( diff --git a/camel/prompts/__init__.py b/camel/prompts/__init__.py index 35fde16d6..49f9d6f35 100644 --- a/camel/prompts/__init__.py +++ b/camel/prompts/__init__.py @@ -18,6 +18,7 @@ from .translation import TranslationPromptTemplateDict from .solution_extraction import SolutionExtractionPromptTemplateDict from .evaluation import EvaluationPromptTemplateDict +from .role_description_prompt_template import RoleDescriptionPromptTemplateDict from .task_prompt_template import TaskPromptTemplateDict from .prompt_templates import PromptTemplateGenerator @@ -30,6 +31,7 @@ 'MisalignmentPromptTemplateDict', 'TranslationPromptTemplateDict', 'EvaluationPromptTemplateDict', + 'RoleDescriptionPromptTemplateDict', 'TaskPromptTemplateDict', 'PromptTemplateGenerator', 'SolutionExtractionPromptTemplateDict', diff --git a/camel/prompts/prompt_templates.py b/camel/prompts/prompt_templates.py index b471253da..96e0c1ad0 100644 --- a/camel/prompts/prompt_templates.py +++ b/camel/prompts/prompt_templates.py @@ -38,7 +38,6 @@ def get_prompt_from_key( self, task_type: TaskType, key: Any, - with_role_description: bool = False, ) -> TextPrompt: r"""Generates a text prompt using the specified :obj:`task_type` and :obj:`key`. @@ -46,8 +45,6 @@ def get_prompt_from_key( Args: task_type (TaskType): The type of task. key (Any): The key used to generate the prompt. - with_role_description (bool, optional): Whether to include the - role description in the generated prompt. Defaults to False. Returns: TextPrompt: The generated text prompt. @@ -57,14 +54,7 @@ def get_prompt_from_key( :obj:`task_type` and :obj:`key`. """ try: - if with_role_description: - role_description = self.task_prompt_template_dict[task_type][ - "role_description"] - task_prompt_template = self.task_prompt_template_dict[ - task_type][key] - return TextPrompt(role_description + task_prompt_template) - else: - return self.task_prompt_template_dict[task_type][key] + return self.task_prompt_template_dict[task_type][key] except KeyError: raise KeyError("Failed to get generate prompt template for " @@ -74,7 +64,6 @@ def get_system_prompt( self, task_type: TaskType, role_type: RoleType, - with_role_description: bool = False, ) -> TextPrompt: r"""Generates a text prompt for the system role, using the specified :obj:`task_type` and :obj:`role_type`. @@ -83,8 +72,6 @@ def get_system_prompt( task_type (TaskType): The type of task. role_type (RoleType): The type of role, either "USER" or "ASSISTANT". - with_role_description (bool, optional): Whether to include the - role description in the generated prompt. Defaults to False. Returns: TextPrompt: The generated text prompt. @@ -94,8 +81,7 @@ def get_system_prompt( :obj:`task_type` and :obj:`role_type`. """ try: - return self.get_prompt_from_key(task_type, role_type, - with_role_description) + return self.get_prompt_from_key(task_type, role_type) except KeyError: prompt = "You are a helpful assistant." diff --git a/camel/prompts/role_description_prompt_template.py b/camel/prompts/role_description_prompt_template.py new file mode 100644 index 000000000..3a11f2cd0 --- /dev/null +++ b/camel/prompts/role_description_prompt_template.py @@ -0,0 +1,54 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +from typing import Any + +from camel.prompts import AISocietyPromptTemplateDict, TextPrompt +from camel.typing import RoleType + + +# flake8: noqa :E501 +class RoleDescriptionPromptTemplateDict(AISocietyPromptTemplateDict): + r"""A dictionary containing :obj:`TextPrompt` used in the `role description` + task. + + Attributes: + DEFAULT_ROLE_DESCRIPTION_PROMPT (TextPrompt): A default prompt to + describe the role descriptions. + ASSISTANT_PROMPT (TextPrompt): A system prompt for the AI assistant + that outlines the rules of the conversation and provides + instructions for completing tasks. + USER_PROMPT (TextPrompt): A system prompt for the AI user that + outlines the rules of the conversation and provides instructions + for giving instructions to the AI assistant. + """ + DEFAULT_ROLE_DESCRIPTION_PROMPT = TextPrompt( + """===== ROLES WITH DESCRIPTION ===== +{user_role} and {assistant_role} are collaborating to complete a task: {task} +{user_role}'s competencies, professional characteristics, duties and workflows to complete the task: {user_description} +{assistant_role}'s competencies, professional characteristics, duties and workflows to complete the task: {assistant_description} +""") + + ASSISTANT_PROMPT = TextPrompt(DEFAULT_ROLE_DESCRIPTION_PROMPT + + AISocietyPromptTemplateDict.ASSISTANT_PROMPT) + + USER_PROMPT = TextPrompt(DEFAULT_ROLE_DESCRIPTION_PROMPT + + AISocietyPromptTemplateDict.USER_PROMPT) + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.update({ + "default_role_description": self.DEFAULT_ROLE_DESCRIPTION_PROMPT, + RoleType.ASSISTANT: self.ASSISTANT_PROMPT, + RoleType.USER: self.USER_PROMPT, + }) diff --git a/camel/prompts/task_prompt_template.py b/camel/prompts/task_prompt_template.py index 30bb2f062..aaaef9dd6 100644 --- a/camel/prompts/task_prompt_template.py +++ b/camel/prompts/task_prompt_template.py @@ -18,6 +18,7 @@ CodePromptTemplateDict, EvaluationPromptTemplateDict, MisalignmentPromptTemplateDict, + RoleDescriptionPromptTemplateDict, SolutionExtractionPromptTemplateDict, TextPromptDict, TranslationPromptTemplateDict, @@ -50,4 +51,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: EvaluationPromptTemplateDict(), TaskType.SOLUTION_EXTRACTION: SolutionExtractionPromptTemplateDict(), + TaskType.ROLE_DESCRIPTION: + RoleDescriptionPromptTemplateDict(), }) diff --git a/camel/societies/role_playing.py b/camel/societies/role_playing.py index 16298f644..5038c94d0 100644 --- a/camel/societies/role_playing.py +++ b/camel/societies/role_playing.py @@ -11,6 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +import warnings from typing import Dict, List, Optional, Sequence, Tuple, Union from camel.agents import ( @@ -121,28 +122,24 @@ def __init__( self.init_planned_task_prompt(task_planner_agent_kwargs, output_language) - if (assistant_agent_kwargs is not None - and "role_description" in assistant_agent_kwargs - and user_agent_kwargs is not None - and "role_description" in user_agent_kwargs): - with_role_description = True - else: - with_role_description = False - sys_msg_generator = SystemMessageGenerator( - task_type=self.task_type, - with_role_description=with_role_description, - **(sys_msg_generator_kwargs or {})) - - assistant_description = (None if assistant_agent_kwargs is None else - assistant_agent_kwargs.get( - "role_description", None)) - user_description = (None if user_agent_kwargs is None else - user_agent_kwargs.get("role_description", None)) + task_type=self.task_type, **(sys_msg_generator_kwargs or {})) + + if self.task_type == TaskType.ROLE_DESCRIPTION: + if not (assistant_agent_kwargs is not None and "role_description" + in assistant_agent_kwargs and user_agent_kwargs is not None + and "role_description" in user_agent_kwargs): + raise ValueError( + "Ensure both `role_description` of the assistant and " + "the user are not None.") + if (self.task_type != TaskType.ROLE_DESCRIPTION + and ("role_description" in assistant_agent_kwargs + or "role_description" in user_agent_kwargs)): + warnings.warn("Role description is unused.") + (init_assistant_sys_msg, init_user_sys_msg, sys_msg_meta_dicts) = self.get_sys_message_info( assistant_role_name, user_role_name, sys_msg_generator, - assistant_description, user_description, extend_sys_msg_meta_dicts) self.assistant_agent: ChatAgent @@ -239,8 +236,6 @@ def get_sys_message_info( assistant_role_name: str, user_role_name: str, sys_msg_generator: SystemMessageGenerator, - assistant_description: Optional[str] = None, - user_description: Optional[str] = None, extend_sys_msg_meta_dicts: Optional[List[Dict]] = None, ) -> Tuple[BaseMessage, BaseMessage, List[Dict]]: r"""Get initial assistant and user system message with a list of @@ -252,9 +247,6 @@ def get_sys_message_info( user_role_name (str): The name of the role played by the user. sys_msg_generator (SystemMessageGenerator): A system message generator for agents. - assistant_description (str, optional): The description of the - assistant. - user_description (str, optional): The description of the user. extend_sys_msg_meta_dicts (List[Dict], optional): A list of dicts to extend the system message meta dicts with. @@ -264,25 +256,17 @@ def get_sys_message_info( initial system message, and a list of system message meta dicts. """ sys_msg_meta_dicts = [dict(task=self.task_prompt) for _ in range(2)] - if (extend_sys_msg_meta_dicts is None and self.task_type - in [TaskType.AI_SOCIETY, TaskType.MISALIGNMENT]): - if (assistant_description is not None - and user_description is not None): - extend_sys_msg_meta_dicts = [ - dict(assistant_role=assistant_role_name, - user_role=user_role_name, - assistant_description=assistant_description, - user_description=user_description) for _ in range(2) - ] - elif (assistant_description is None and user_description is None): - extend_sys_msg_meta_dicts = [ - dict(assistant_role=assistant_role_name, - user_role=user_role_name) for _ in range(2) - ] - else: - raise ValueError( - "Both assistant and user descriptions should " - "either be 'None' or both should have values.") + if (extend_sys_msg_meta_dicts is None and self.task_type in [ + TaskType.AI_SOCIETY, TaskType.MISALIGNMENT, + TaskType.ROLE_DESCRIPTION + ]): + extend_sys_msg_meta_dicts = [ + dict(assistant_role=assistant_role_name, + user_role=user_role_name) for _ in range(2) + ] + else: + raise ValueError("Both assistant and user descriptions should " + "either be 'None' or both should have values.") if extend_sys_msg_meta_dicts is not None: sys_msg_meta_dicts = [{ diff --git a/camel/typing.py b/camel/typing.py index 1b274d02e..dae8a5184 100644 --- a/camel/typing.py +++ b/camel/typing.py @@ -60,6 +60,7 @@ class TaskType(Enum): TRANSLATION = "translation" EVALUATION = "evaluation" SOLUTION_EXTRACTION = "solution_extraction" + ROLE_DESCRIPTION = "role_description" DEFAULT = "default" From 98bf40aefb2898a071af0ffa7be58e74bc34a57c Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Fri, 18 Aug 2023 23:36:03 +0200 Subject: [PATCH 13/33] Add examples and tests for the new TaskType --- .../role_generation.py} | 0 .../role_playing_with_role_description.py} | 2 ++ examples/test/test_ai_society_example.py | 13 --------- examples/test/test_role_description.py | 29 +++++++++++++++++++ 4 files changed, 31 insertions(+), 13 deletions(-) rename examples/{ai_society/role_assignment.py => role_description/role_generation.py} (100%) rename examples/{ai_society/role_playing_with_role_generation.py => role_description/role_playing_with_role_description.py} (97%) create mode 100644 examples/test/test_role_description.py diff --git a/examples/ai_society/role_assignment.py b/examples/role_description/role_generation.py similarity index 100% rename from examples/ai_society/role_assignment.py rename to examples/role_description/role_generation.py diff --git a/examples/ai_society/role_playing_with_role_generation.py b/examples/role_description/role_playing_with_role_description.py similarity index 97% rename from examples/ai_society/role_playing_with_role_generation.py rename to examples/role_description/role_playing_with_role_description.py index ec72d808c..235daeda4 100644 --- a/examples/ai_society/role_playing_with_role_generation.py +++ b/examples/role_description/role_playing_with_role_description.py @@ -16,6 +16,7 @@ from camel.agents.role_assignment import RoleAssignmentAgent from camel.configs import ChatGPTConfig from camel.societies import RolePlaying +from camel.typing import TaskType from camel.utils import print_text_animated AI_ASSISTANT_ROLE_INDEX = 0 @@ -38,6 +39,7 @@ def main(model_type=None) -> None: role_play_session = RolePlaying( task_prompt=task_prompt, + task_type=TaskType.ROLE_DESCRIPTION, # important for role description with_task_specify=True, assistant_role_name=ai_assistant_role, user_role_name=ai_user_role, diff --git a/examples/test/test_ai_society_example.py b/examples/test/test_ai_society_example.py index 4a7d884f0..89410bcb2 100644 --- a/examples/test/test_ai_society_example.py +++ b/examples/test/test_ai_society_example.py @@ -13,9 +13,7 @@ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== from mock import patch -import examples.ai_society.role_assignment import examples.ai_society.role_playing -import examples.ai_society.role_playing_with_role_generation import examples.function_call.role_playing_with_function from camel.typing import ModelType @@ -25,17 +23,6 @@ def test_ai_society_role_playing_example(): examples.ai_society.role_playing.main(ModelType.STUB) -def test_ai_society_role_assignment_example(): - with patch('time.sleep', return_value=None): - examples.ai_society.role_assignment.main(ModelType.GPT_3_5_TURBO) - - -def test_role_playing_with_role_generation_example(): - with patch('time.sleep', return_value=None): - examples.ai_society.role_playing_with_role_generation.main( - ModelType.GPT_3_5_TURBO) - - def test_role_playing_with_function_example(): with patch('time.sleep', return_value=None): examples.function_call.role_playing_with_function.main(ModelType.STUB) diff --git a/examples/test/test_role_description.py b/examples/test/test_role_description.py new file mode 100644 index 000000000..4ef5cc451 --- /dev/null +++ b/examples/test/test_role_description.py @@ -0,0 +1,29 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +from mock import patch + +import examples.role_description.role_generation +import examples.role_description.role_playing_with_role_description +from camel.typing import ModelType + + +def test_role_generation_example(): + with patch('time.sleep', return_value=None): + examples.role_description.role_generation.main(ModelType.GPT_3_5_TURBO) + + +def test_role_playing_with_role_description_example(): + with patch('time.sleep', return_value=None): + examples.role_description.role_playing_with_role_description.main( + ModelType.GPT_3_5_TURBO) From 7291e763f6f5ed6f595c7b856775b593682db8a1 Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Fri, 18 Aug 2023 23:49:52 +0200 Subject: [PATCH 14/33] Update --- camel/societies/role_playing.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/camel/societies/role_playing.py b/camel/societies/role_playing.py index 5038c94d0..1a856b407 100644 --- a/camel/societies/role_playing.py +++ b/camel/societies/role_playing.py @@ -133,8 +133,8 @@ def __init__( "Ensure both `role_description` of the assistant and " "the user are not None.") if (self.task_type != TaskType.ROLE_DESCRIPTION - and ("role_description" in assistant_agent_kwargs - or "role_description" in user_agent_kwargs)): + and ("role_description" in (assistant_agent_kwargs or {}) + or "role_description" in (user_agent_kwargs or {}))): warnings.warn("Role description is unused.") (init_assistant_sys_msg, init_user_sys_msg, @@ -264,9 +264,6 @@ def get_sys_message_info( dict(assistant_role=assistant_role_name, user_role=user_role_name) for _ in range(2) ] - else: - raise ValueError("Both assistant and user descriptions should " - "either be 'None' or both should have values.") if extend_sys_msg_meta_dicts is not None: sys_msg_meta_dicts = [{ From a1fd3ccb41ee7a9b5ee6c3a66eab918256daf842 Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Sat, 19 Aug 2023 00:07:31 +0200 Subject: [PATCH 15/33] Update --- camel/agents/chat_agent.py | 4 ---- camel/prompts/prompt_templates.py | 8 ++------ 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 7d47ea7f0..7b0226eff 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -122,8 +122,6 @@ class ChatAgent(BaseAgent): agent. (default: :obj:`None`) function_list (Optional[List[OpenAIFunction]]): List of available :obj:`OpenAIFunction`. (default: :obj:`None`) - role_description (str, optional): Description of the role - :obj:`str`. (default: :obj:`None`) """ def __init__( @@ -134,14 +132,12 @@ def __init__( message_window_size: Optional[int] = None, output_language: Optional[str] = None, function_list: Optional[List[OpenAIFunction]] = None, - role_description: Optional[str] = None, ) -> None: self.orig_sys_message: BaseMessage = system_message self.system_message = system_message self.role_name: str = system_message.role_name self.role_type: RoleType = system_message.role_type - self.role_description: Optional[str] = role_description self.output_language: Optional[str] = output_language if self.output_language is not None: self.set_output_language(self.output_language) diff --git a/camel/prompts/prompt_templates.py b/camel/prompts/prompt_templates.py index 96e0c1ad0..005fb94e8 100644 --- a/camel/prompts/prompt_templates.py +++ b/camel/prompts/prompt_templates.py @@ -34,11 +34,7 @@ def __init__( self.task_prompt_template_dict = (task_prompt_template_dict or TaskPromptTemplateDict()) - def get_prompt_from_key( - self, - task_type: TaskType, - key: Any, - ) -> TextPrompt: + def get_prompt_from_key(self, task_type: TaskType, key: Any) -> TextPrompt: r"""Generates a text prompt using the specified :obj:`task_type` and :obj:`key`. @@ -87,7 +83,7 @@ def get_system_prompt( prompt = "You are a helpful assistant." warnings.warn("Failed to get system prompt template for " - f"task: {task_type.value}, role: {role_type}. " + f"task: {task_type.value}, role: {role_type.value}. " f"Set template to: {prompt}") return TextPrompt(prompt) From ee5f8252749b2de8199c83c7ccfe2d245255493a Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Sat, 19 Aug 2023 00:26:27 +0200 Subject: [PATCH 16/33] Update --- camel/agents/chat_agent.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 7b0226eff..7d47ea7f0 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -122,6 +122,8 @@ class ChatAgent(BaseAgent): agent. (default: :obj:`None`) function_list (Optional[List[OpenAIFunction]]): List of available :obj:`OpenAIFunction`. (default: :obj:`None`) + role_description (str, optional): Description of the role + :obj:`str`. (default: :obj:`None`) """ def __init__( @@ -132,12 +134,14 @@ def __init__( message_window_size: Optional[int] = None, output_language: Optional[str] = None, function_list: Optional[List[OpenAIFunction]] = None, + role_description: Optional[str] = None, ) -> None: self.orig_sys_message: BaseMessage = system_message self.system_message = system_message self.role_name: str = system_message.role_name self.role_type: RoleType = system_message.role_type + self.role_description: Optional[str] = role_description self.output_language: Optional[str] = output_language if self.output_language is not None: self.set_output_language(self.output_language) From 3ab14d73d0985871e5e830d5a5a9dc05f70df1c0 Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Mon, 21 Aug 2023 00:41:26 +0200 Subject: [PATCH 17/33] Update --- camel/agents/chat_agent.py | 4 -- camel/agents/role_assignment.py | 30 +++++++------ .../role_description_prompt_template.py | 4 +- camel/societies/role_playing.py | 43 ++++++++++++------- .../role_playing_with_role_description.py | 14 +++--- 5 files changed, 54 insertions(+), 41 deletions(-) diff --git a/camel/agents/chat_agent.py b/camel/agents/chat_agent.py index 7d47ea7f0..7b0226eff 100644 --- a/camel/agents/chat_agent.py +++ b/camel/agents/chat_agent.py @@ -122,8 +122,6 @@ class ChatAgent(BaseAgent): agent. (default: :obj:`None`) function_list (Optional[List[OpenAIFunction]]): List of available :obj:`OpenAIFunction`. (default: :obj:`None`) - role_description (str, optional): Description of the role - :obj:`str`. (default: :obj:`None`) """ def __init__( @@ -134,14 +132,12 @@ def __init__( message_window_size: Optional[int] = None, output_language: Optional[str] = None, function_list: Optional[List[OpenAIFunction]] = None, - role_description: Optional[str] = None, ) -> None: self.orig_sys_message: BaseMessage = system_message self.system_message = system_message self.role_name: str = system_message.role_name self.role_type: RoleType = system_message.role_type - self.role_description: Optional[str] = role_description self.output_language: Optional[str] = output_language if self.output_language is not None: self.set_output_language(self.output_language) diff --git a/camel/agents/role_assignment.py b/camel/agents/role_assignment.py index 13551b35a..6d03dd936 100644 --- a/camel/agents/role_assignment.py +++ b/camel/agents/role_assignment.py @@ -71,18 +71,19 @@ def run_role_with_description( """ self.reset() - expert_prompt = "\n".join( - f"Domain expert {i + 1}: <|blank|>\n" - f"Associated competencies, professional characteristics, duties " - f"and workflows: <|blank|>. End.\n" for i in range(num_roles or 0)) + expert_prompt = "===== ANSWER PROMPT =====\n" + "\n".join( + f"Domain expert {i + 1}: \n" + f"Associated competencies, characteristics, duties " + f"and workflows: . End." for i in range(num_roles or 0)) role_assignment_generation_prompt = TextPrompt( - "You are the boss, you need to recruit experts in {num_roles} " + - "different fields to solve the task.\n" + - "Please tell me which domain experts should be recruited, " + - "and what competencies, professional characteristics, duties " + - "and workflows to complete the task.\n" + - "ONLY return the content in BLANK.\n\n" + "===== TASK =====\n" + - "{task}\n\n" + "===== PROMPT =====\n" + expert_prompt) + "You are the boss, and you're in charge of recruiting " + + "{num_roles} experts for the following task." + + "\n==== TASK =====\n {task}\n\n" + + "Identify the domain experts you'd recruit and detail their " + + "associated competencies, characteristics, duties and workflows " + + "to complete the task.\n " + + "Your answer MUST adhere to the format of ANSWER PROMPT, and " + + "ONLY answer the BLANKs.\n" + expert_prompt) role_assignment_generation = role_assignment_generation_prompt.format( num_roles=num_roles, task=task_prompt) @@ -106,13 +107,14 @@ def run_role_with_description( ] role_descriptions = [ desc.replace("<|", "").replace("|>", "") for desc in re.findall( - r"Associated competencies, professional characteristics, " - r"duties and workflows: (.+?) End.", output_completion.content, + r"Associated competencies, characteristics, " + r"duties and workflows:(.+?) End.", output_completion.content, re.DOTALL) ] if len(role_names) != num_roles or len(role_descriptions) != num_roles: - raise RuntimeError("Got None or insufficient Role messages. ") + raise RuntimeError( + "Got None or insufficient information of roles.") if terminated: raise RuntimeError("Role assignment failed.") diff --git a/camel/prompts/role_description_prompt_template.py b/camel/prompts/role_description_prompt_template.py index 3a11f2cd0..b3cdc6bec 100644 --- a/camel/prompts/role_description_prompt_template.py +++ b/camel/prompts/role_description_prompt_template.py @@ -35,8 +35,8 @@ class RoleDescriptionPromptTemplateDict(AISocietyPromptTemplateDict): DEFAULT_ROLE_DESCRIPTION_PROMPT = TextPrompt( """===== ROLES WITH DESCRIPTION ===== {user_role} and {assistant_role} are collaborating to complete a task: {task} -{user_role}'s competencies, professional characteristics, duties and workflows to complete the task: {user_description} -{assistant_role}'s competencies, professional characteristics, duties and workflows to complete the task: {assistant_description} +{user_role}'s competencies, characteristics, duties and workflows to complete the task: {user_description} +{assistant_role}'s competencies, characteristics, duties and workflows to complete the task: {assistant_description} """) ASSISTANT_PROMPT = TextPrompt(DEFAULT_ROLE_DESCRIPTION_PROMPT + diff --git a/camel/societies/role_playing.py b/camel/societies/role_playing.py index 1a856b407..2f14146f0 100644 --- a/camel/societies/role_playing.py +++ b/camel/societies/role_playing.py @@ -11,7 +11,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -import warnings from typing import Dict, List, Optional, Sequence, Tuple, Union from camel.agents import ( @@ -125,18 +124,6 @@ def __init__( sys_msg_generator = SystemMessageGenerator( task_type=self.task_type, **(sys_msg_generator_kwargs or {})) - if self.task_type == TaskType.ROLE_DESCRIPTION: - if not (assistant_agent_kwargs is not None and "role_description" - in assistant_agent_kwargs and user_agent_kwargs is not None - and "role_description" in user_agent_kwargs): - raise ValueError( - "Ensure both `role_description` of the assistant and " - "the user are not None.") - if (self.task_type != TaskType.ROLE_DESCRIPTION - and ("role_description" in (assistant_agent_kwargs or {}) - or "role_description" in (user_agent_kwargs or {}))): - warnings.warn("Role description is unused.") - (init_assistant_sys_msg, init_user_sys_msg, sys_msg_meta_dicts) = self.get_sys_message_info( assistant_role_name, user_role_name, sys_msg_generator, @@ -257,13 +244,39 @@ def get_sys_message_info( """ sys_msg_meta_dicts = [dict(task=self.task_prompt) for _ in range(2)] if (extend_sys_msg_meta_dicts is None and self.task_type in [ - TaskType.AI_SOCIETY, TaskType.MISALIGNMENT, - TaskType.ROLE_DESCRIPTION + TaskType.AI_SOCIETY, + TaskType.MISALIGNMENT, ]): extend_sys_msg_meta_dicts = [ dict(assistant_role=assistant_role_name, user_role=user_role_name) for _ in range(2) ] + elif (self.task_type == TaskType.ROLE_DESCRIPTION): + if (extend_sys_msg_meta_dicts is None + or len(extend_sys_msg_meta_dicts) != 2): + # In `TaskType.ROLE_DESCRIPTION`, `extend_sys_msg_meta_dicts` + # should have two elements, one for assistant and one for user + raise ValueError("`extend_sys_msg_meta_dicts` should have two " + "elements for `TaskType.ROLE_DESCRIPTION`.") + # Validate `extend_sys_msg_meta_dicts` has `assistant_description` + # and `user_description` + if ("assistant_description" not in extend_sys_msg_meta_dicts[0] + or "user_description" not in extend_sys_msg_meta_dicts[0] + or "assistant_description" + not in extend_sys_msg_meta_dicts[1] + or "user_description" not in extend_sys_msg_meta_dicts[1]): + raise ValueError("Ensure both `assistant_description` and " + "`user_description` are not None.") + + role_name_msg_meta_dicts = [ + dict(assistant_role=assistant_role_name, + user_role=user_role_name) for _ in range(2) + ] + extend_sys_msg_meta_dicts = [{ + **role_name_msg_meta_dict, + **sys_msg_meta_dict + } for role_name_msg_meta_dict, sys_msg_meta_dict in zip( + role_name_msg_meta_dicts, extend_sys_msg_meta_dicts)] if extend_sys_msg_meta_dicts is not None: sys_msg_meta_dicts = [{ diff --git a/examples/role_description/role_playing_with_role_description.py b/examples/role_description/role_playing_with_role_description.py index 235daeda4..97802a5a4 100644 --- a/examples/role_description/role_playing_with_role_description.py +++ b/examples/role_description/role_playing_with_role_description.py @@ -36,6 +36,13 @@ def main(model_type=None) -> None: ai_assistant_role = role_names[AI_ASSISTANT_ROLE_INDEX] ai_user_role = role_names[AI_USER_ROLE_INDEX] + ai_assistant_description = role_description_dict[ai_assistant_role] + ai_user_description = role_description_dict[ai_user_role] + + sys_msg_meta_dicts = [ + dict(assistant_description=ai_assistant_description, + user_description=ai_user_description) for _ in range(2) + ] role_play_session = RolePlaying( task_prompt=task_prompt, @@ -43,13 +50,8 @@ def main(model_type=None) -> None: with_task_specify=True, assistant_role_name=ai_assistant_role, user_role_name=ai_user_role, - assistant_agent_kwargs=dict( - model=model_type, - role_description=role_description_dict[ai_assistant_role]), - user_agent_kwargs=dict( - model=model_type, - role_description=role_description_dict[ai_user_role]), task_specify_agent_kwargs=dict(model=model_type), + extend_sys_msg_meta_dicts=sys_msg_meta_dicts, ) print( From e45309880571a2ace9c0de18476d84beea52076b Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Mon, 21 Aug 2023 10:40:02 +0200 Subject: [PATCH 18/33] Rename to role_assignment_agent --- camel/agents/{role_assignment.py => role_assignment_agent.py} | 0 examples/role_description/role_generation.py | 2 +- examples/role_description/role_playing_with_role_description.py | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename camel/agents/{role_assignment.py => role_assignment_agent.py} (100%) diff --git a/camel/agents/role_assignment.py b/camel/agents/role_assignment_agent.py similarity index 100% rename from camel/agents/role_assignment.py rename to camel/agents/role_assignment_agent.py diff --git a/examples/role_description/role_generation.py b/examples/role_description/role_generation.py index 7188bc0ec..3ae7ded3f 100644 --- a/examples/role_description/role_generation.py +++ b/examples/role_description/role_generation.py @@ -13,7 +13,7 @@ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== from colorama import Fore -from camel.agents.role_assignment import RoleAssignmentAgent +from camel.agents.role_assignment_agent import RoleAssignmentAgent from camel.configs import ChatGPTConfig diff --git a/examples/role_description/role_playing_with_role_description.py b/examples/role_description/role_playing_with_role_description.py index 97802a5a4..6fb3ae3b7 100644 --- a/examples/role_description/role_playing_with_role_description.py +++ b/examples/role_description/role_playing_with_role_description.py @@ -13,7 +13,7 @@ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== from colorama import Fore -from camel.agents.role_assignment import RoleAssignmentAgent +from camel.agents.role_assignment_agent import RoleAssignmentAgent from camel.configs import ChatGPTConfig from camel.societies import RolePlaying from camel.typing import TaskType From 473b8a4ae21acc524905f434d5a20d6695396eb3 Mon Sep 17 00:00:00 2001 From: Appointat <65004114+Appointat@users.noreply.github.com> Date: Wed, 30 Aug 2023 12:21:18 +0200 Subject: [PATCH 19/33] Update the branch feature/role-generation from master (#265) Co-authored-by: zhiyu-01 <121875294+zhiyu-01@users.noreply.github.com> Co-authored-by: Guohao Li Co-authored-by: MorphlingEd Co-authored-by: Tianqi Xu <40522713+dandansamax@users.noreply.github.com> Co-authored-by: Wenxuan Li <55635778+MorphlingEd@users.noreply.github.com> --- camel/configs.py | 4 + camel/functions/search_functions.py | 99 ++++--------------- camel/societies/role_playing.py | 18 ---- .../role_playing_with_function.py | 23 ++++- pyproject.toml | 4 + test/agents/test_role_playing.py | 31 ++++++ test/functions/test_search_functions.py | 47 +++------ 7 files changed, 91 insertions(+), 135 deletions(-) diff --git a/camel/configs.py b/camel/configs.py index a75c6a509..ec9d455e7 100644 --- a/camel/configs.py +++ b/camel/configs.py @@ -108,6 +108,7 @@ def from_openai_function_list( cls, function_list: List[OpenAIFunction], function_call: Union[Dict[str, str], str] = "auto", + kwargs: Optional[Dict[str, Any]] = None, ): r"""Class method for creating an instance given the function-related arguments. @@ -118,6 +119,8 @@ def from_openai_function_list( function_call (Union[Dict[str, str], str], optional): Controls how the model responds to function calls, as specified in the creator's documentation. + kwargs (Optional[Dict[str, Any]]): The extra modifications to be + made on the original settings defined in :obj:`ChatGPTConfig`. Return: FunctionCallingConfig: A new instance which loads the given @@ -127,4 +130,5 @@ def from_openai_function_list( return cls( functions=[func.as_dict() for func in function_list], function_call=function_call, + **(kwargs or {}), ) diff --git a/camel/functions/search_functions.py b/camel/functions/search_functions.py index 53b77ae49..b3b213500 100644 --- a/camel/functions/search_functions.py +++ b/camel/functions/search_functions.py @@ -13,98 +13,37 @@ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== from typing import List -import requests -from bs4 import BeautifulSoup +import wikipedia from .openai_function import OpenAIFunction -def clean_str(p: str) -> str: - r"""Cleans the input string by encoding and decoding it multiple times - to ensure it can be properly read and processed by Python code. - - Args: - p (str): The input string to be cleaned, typically from the webpage. - - Returns: - str: The cleaned string. - """ - return p.encode().decode("unicode-" - "escape").encode("latin1").decode("utf-8") - - -def get_page_abstract(page: str) -> str: - r"""Returns the first :obj:`5` sentences of the fetched page. - - Args: - page (str): The fetched page. - - Returns: - str: The concatenation of the first :obj:`5` sentences in the - given page. - """ - paragraphs = page.split('\n') - paragraphs = [p.strip() for p in paragraphs if p.strip()] - - # find all sentences - sentences = [] - for p in paragraphs: - sents = p.split('. ') - if sents[-1].endswith('.'): - sents[-1] = sents[-1][:-1] - sentences += sents - sentences = [s.strip() + '.' for s in sentences if s.strip()] - - return ' '.join(sentences[:5]) - - def search_wiki(entity: str) -> str: - r"""Search the entity in WikiPedia and return (the first :obj:`5` - sentences of) the required page, containing factual information - about the given entity. + r"""Search the entity in WikiPedia and return the summary of the + required page, containing factual information about the given entity. Args: entity (string): The entity to be searched. Returns: string: The search result. If the page corresponding to the entity - exists, return the first :obj:`5` sentences in a string. + exists, return the summary of this entity in a string. """ - entity_ = entity.replace(" ", "+") - search_url = f"https://en.wikipedia.org/w/index.php?search={entity_}" - - # request the target page - response_text = requests.get(search_url).text - - # parse the obtained page - soup = BeautifulSoup(response_text, features="html.parser") - result_divs = soup.find_all("div", {"class": "mw-search-result-heading"}) - - observation: str - if result_divs: - # only similar concepts exist - result_titles = [ - clean_str(div.get_text().strip()) for div in result_divs - ] - observation = (f"Could not find {entity}. " - f"Similar: {result_titles[:5]}.") - else: - # the page corresponding to the entity exists - page = [ - p.get_text().strip() - for p in soup.find_all("p") + soup.find_all("ul") - ] - - res_page = "" - for p in page: - if len(p.split(" ")) > 2: - res_page += clean_str(p) - if not p.endswith("\n"): - res_page += "\n" - - observation = get_page_abstract(res_page) - - return observation + result: str + + try: + result = wikipedia.summary(entity, sentences=5, auto_suggest=False) + except wikipedia.exceptions.DisambiguationError as e: + result = wikipedia.summary(e.options[0], sentences=5, + auto_suggest=False) + except wikipedia.exceptions.PageError: + result = ("There is no page in Wikipedia corresponding to entity " + f"{entity}, please specify another word to describe the" + " entity to be searched.") + except wikipedia.exceptions.WikipediaException as e: + result = f"An exception occurred during the search: {e}" + + return result SEARCH_FUNCS: List[OpenAIFunction] = [ diff --git a/camel/societies/role_playing.py b/camel/societies/role_playing.py index 2f14146f0..493b0ebd8 100644 --- a/camel/societies/role_playing.py +++ b/camel/societies/role_playing.py @@ -20,8 +20,6 @@ TaskSpecifyAgent, ) from camel.agents.chat_agent import ChatAgentResponse -from camel.configs import FunctionCallingConfig -from camel.functions import OpenAIFunction from camel.generators import SystemMessageGenerator from camel.human import Human from camel.messages import BaseMessage @@ -73,9 +71,6 @@ class RolePlaying: task specify meta dict with. (default: :obj:`None`) output_language (str, optional): The language to be output by the agents. (default: :obj:`None`) - assistant_functions (list, optional): List of - :obj:`OpenAIFunction` objects to be loaded. If not specified, - function calling will be disabled. (default: :obj:`None`) """ def __init__( @@ -100,7 +95,6 @@ def __init__( extend_sys_msg_meta_dicts: Optional[List[Dict]] = None, extend_task_specify_meta_dict: Optional[Dict] = None, output_language: Optional[str] = None, - assistant_functions: Optional[List[OpenAIFunction]] = None, ) -> None: self.with_task_specify = with_task_specify self.with_task_planner = with_task_planner @@ -109,8 +103,6 @@ def __init__( self.task_type = task_type self.task_prompt = task_prompt - self.assistant_functions = assistant_functions - self.specified_task_prompt: Optional[TextPrompt] = None self.init_specified_task_prompt(assistant_role_name, user_role_name, task_specify_agent_kwargs, @@ -317,14 +309,6 @@ def init_agents( output_language (str, optional): The language to be output by the agents. """ - if self.assistant_functions is not None: - assistant_config = FunctionCallingConfig.from_openai_function_list( - function_list=self.assistant_functions, - function_call="auto", - ) - else: - assistant_config = None - if self.model_type is not None: if assistant_agent_kwargs is None: assistant_agent_kwargs = {} @@ -335,9 +319,7 @@ def init_agents( self.assistant_agent = ChatAgent( init_assistant_sys_msg, - model_config=assistant_config, output_language=output_language, - function_list=self.assistant_functions, **(assistant_agent_kwargs or {}), ) self.assistant_sys_msg = self.assistant_agent.system_message diff --git a/examples/function_call/role_playing_with_function.py b/examples/function_call/role_playing_with_function.py index 09695972b..a79410454 100644 --- a/examples/function_call/role_playing_with_function.py +++ b/examples/function_call/role_playing_with_function.py @@ -16,6 +16,7 @@ from colorama import Fore from camel.agents.chat_agent import FunctionCallingRecord +from camel.configs import ChatGPTConfig, FunctionCallingConfig from camel.functions import MATH_FUNCS, SEARCH_FUNCS from camel.societies import RolePlaying from camel.typing import ModelType @@ -25,15 +26,29 @@ def main(model_type=ModelType.GPT_4) -> None: task_prompt = ("Assuming the current year is 2023, estimate KAUST's " "current age and then add 10 more years to this age.") + + user_model_config = ChatGPTConfig(temperature=0.0) + + function_list = [*MATH_FUNCS, *SEARCH_FUNCS] + assistant_model_config = FunctionCallingConfig.from_openai_function_list( + function_list=function_list, + kwargs=dict(temperature=0.0), + ) + role_play_session = RolePlaying( assistant_role_name="Searcher", user_role_name="Professor", - assistant_agent_kwargs=dict(model=model_type), - user_agent_kwargs=dict(model=model_type), + assistant_agent_kwargs=dict( + model=model_type, + model_config=assistant_model_config, + function_list=function_list, + ), + user_agent_kwargs=dict( + model=model_type, + model_config=user_model_config, + ), task_prompt=task_prompt, with_task_specify=False, - task_specify_agent_kwargs=dict(model=model_type), - assistant_functions=[*MATH_FUNCS, *SEARCH_FUNCS], ) print( diff --git a/pyproject.toml b/pyproject.toml index 47aa5b5a5..4d69c5d8f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,6 +43,7 @@ torch = { version = "^1", optional = true } soundfile = { version = "^0", optional = true } sentencepiece = { version = "^0", optional = true } opencv-python = { version = "^4", optional = true } +wikipedia = { version = "^1", optional = true } [tool.poetry.extras] huggingface-agent = [ @@ -54,6 +55,7 @@ huggingface-agent = [ "soundfile", "sentencepiece", "opencv-python", + "wikipedia", ] all = [ @@ -65,6 +67,7 @@ all = [ "soundfile", "sentencepiece", "opencv-python", + "wikipedia", ] [tool.poetry.group.dev] @@ -128,5 +131,6 @@ module = [ "database_connection", "huggingface_hub", "huggingface_hub.utils._errors", + "wikipedia", ] ignore_missing_imports = true diff --git a/test/agents/test_role_playing.py b/test/agents/test_role_playing.py index dca115dc5..390a74f8f 100644 --- a/test/agents/test_role_playing.py +++ b/test/agents/test_role_playing.py @@ -14,6 +14,8 @@ import pytest from camel.agents import ChatAgent, CriticAgent +from camel.configs import FunctionCallingConfig +from camel.functions import MATH_FUNCS from camel.human import Human from camel.messages import BaseMessage from camel.societies import RolePlaying @@ -115,3 +117,32 @@ def test_role_playing_step(task_type, extend_sys_msg_meta_dicts, assert isinstance(response.terminated, bool) assert response.terminated is False assert isinstance(response.info, dict) + + +@pytest.mark.model_backend +def test_role_playing_with_function(): + function_list = [*MATH_FUNCS] + assistant_model_config = FunctionCallingConfig.from_openai_function_list( + function_list=function_list) + + role_playing = RolePlaying( + assistant_role_name="AI Assistant", + assistant_agent_kwargs=dict(model=ModelType.GPT_3_5_TURBO, + model_config=assistant_model_config, + function_list=function_list), + user_role_name="AI User", + user_agent_kwargs=dict(model=ModelType.GPT_3_5_TURBO), + task_prompt="Perform the task", + task_specify_agent_kwargs=dict(model=ModelType.GPT_3_5_TURBO), + task_type=TaskType.AI_SOCIETY, + ) + + init_assistant_msg, _ = role_playing.init_chat() + assistant_response, user_response = role_playing.step(init_assistant_msg) + for response in (assistant_response, user_response): + assert isinstance(response.msgs, list) + assert len(response.msgs) == 1 + assert isinstance(response.msgs[0], BaseMessage) + assert isinstance(response.terminated, bool) + assert response.terminated is False + assert isinstance(response.info, dict) diff --git a/test/functions/test_search_functions.py b/test/functions/test_search_functions.py index 0a5b7f05a..3852410e0 100644 --- a/test/functions/test_search_functions.py +++ b/test/functions/test_search_functions.py @@ -11,44 +11,17 @@ # See the License for the specific language governing permissions and # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -from camel.functions.search_functions import ( - clean_str, - get_page_abstract, - search_wiki, -) +import wikipedia - -def test_clean_str(): - input_str = "Some escaped string with unicode characters: \u2019" - expected_output = "Some escaped string with unicode characters: ’" - assert clean_str(input_str) == expected_output - - -def test_get_page_abstract(): - input_page = "\n".join([ - "This is the first sentence", "This is the second sentence", - "This is the third sentence", "This is the fourth sentence", - "This is the fifth sentence", "This is the sixth sentence" - ]) - expected_output = ( - "This is the first sentence. This is the second sentence. " - "This is the third sentence. This is the fourth sentence. " - "This is the fifth sentence.") - - assert get_page_abstract(input_page) == expected_output +from camel.functions.search_functions import search_wiki def test_search_wiki_normal(): expected_output = ( "Erygia sigillata is a species of moth in the family Erebidae found " - "in Himachal Pradesh, Northern India.[2] The moth was officially " - "recognized and classified in 1892. This Erebinae-related article " - "is a stub. You can help Wikipedia by expanding it. Main " - "pageContentsCurrent eventsRandom articleAbout WikipediaContact " - "usDonate. HelpLearn to editCommunity portalRecent " - "changesUpload file.") - - # Test that `search_wiki` returns the expected output + "in Himachal Pradesh, Northern India. The moth was officially " + "recognized and classified in 1892.") + assert search_wiki("Erygia sigillata") == expected_output @@ -56,4 +29,12 @@ def test_search_wiki_not_found(): search_output = search_output = search_wiki( "South Africa Women Football Team") assert search_output.startswith( - "Could not find South Africa Women Football Team.") + "There is no page in Wikipedia corresponding to entity South Africa " + "Women Football Team, please specify another word to describe the " + "entity to be searched.") + + +def test_search_wiki_with_ambiguity(): + expected_output = wikipedia.summary("New York (state)", sentences=5, + auto_suggest=False) + assert search_wiki("New York") == expected_output From 84b45b2fd22f8b3f802352774c833e079dc90b04 Mon Sep 17 00:00:00 2001 From: Appointat <65004114+Appointat@users.noreply.github.com> Date: Wed, 30 Aug 2023 12:28:21 +0200 Subject: [PATCH 20/33] Update the branch feature/role-generation from master (#266) Co-authored-by: zhiyu-01 <121875294+zhiyu-01@users.noreply.github.com> Co-authored-by: Guohao Li Co-authored-by: MorphlingEd Co-authored-by: Tianqi Xu <40522713+dandansamax@users.noreply.github.com> Co-authored-by: Wenxuan Li <55635778+MorphlingEd@users.noreply.github.com> From dec6340586fcd45115ec0ed532bae9d2512a5f70 Mon Sep 17 00:00:00 2001 From: Appointat <65004114+Appointat@users.noreply.github.com> Date: Thu, 31 Aug 2023 11:36:05 +0200 Subject: [PATCH 21/33] Update camel/societies/role_playing.py Co-authored-by: Guohao Li --- camel/societies/role_playing.py | 1 + 1 file changed, 1 insertion(+) diff --git a/camel/societies/role_playing.py b/camel/societies/role_playing.py index 493b0ebd8..f1a68e7bc 100644 --- a/camel/societies/role_playing.py +++ b/camel/societies/role_playing.py @@ -238,6 +238,7 @@ def get_sys_message_info( if (extend_sys_msg_meta_dicts is None and self.task_type in [ TaskType.AI_SOCIETY, TaskType.MISALIGNMENT, + TaskType.ROLE_DESCRIPTION, ]): extend_sys_msg_meta_dicts = [ dict(assistant_role=assistant_role_name, From 6bdfee3bc866ac788775ed4051144912d982fe8e Mon Sep 17 00:00:00 2001 From: Appointat <65004114+Appointat@users.noreply.github.com> Date: Thu, 31 Aug 2023 11:36:25 +0200 Subject: [PATCH 22/33] Update camel/agents/role_assignment_agent.py Co-authored-by: Guohao Li --- camel/agents/role_assignment_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/camel/agents/role_assignment_agent.py b/camel/agents/role_assignment_agent.py index 6d03dd936..b95cb2292 100644 --- a/camel/agents/role_assignment_agent.py +++ b/camel/agents/role_assignment_agent.py @@ -76,7 +76,7 @@ def run_role_with_description( f"Associated competencies, characteristics, duties " f"and workflows: . End." for i in range(num_roles or 0)) role_assignment_generation_prompt = TextPrompt( - "You are the boss, and you're in charge of recruiting " + + "You are a role assignment agent, and you're in charge of recruiting " + "{num_roles} experts for the following task." + "\n==== TASK =====\n {task}\n\n" + "Identify the domain experts you'd recruit and detail their " + From bbb9d5f4b95020af48c7d5b9535e0e2bcba156ec Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Thu, 31 Aug 2023 11:47:44 +0200 Subject: [PATCH 23/33] Update --- camel/agents/role_assignment_agent.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/camel/agents/role_assignment_agent.py b/camel/agents/role_assignment_agent.py index 6d03dd936..f78f4f647 100644 --- a/camel/agents/role_assignment_agent.py +++ b/camel/agents/role_assignment_agent.py @@ -40,10 +40,6 @@ def __init__( model: ModelType = ModelType.GPT_3_5_TURBO, model_config: Optional[Any] = None, ) -> None: - self.role_assignment_prompt = TextPrompt( - 'Given this task, "{task}", generate two role names, ' + - 'one for the AI user and one for the AI assistant.') - system_message = BaseMessage( role_name="Role Assigner", role_type=RoleType.ASSISTANT, @@ -68,6 +64,9 @@ def run_role_with_description( Returns: Tuple[List[str], Dict[str, str], bool, Dict[str, Any]]: A tuple + containing the generated role names, the generated role + descriptions, whether the role assignment is terminated, and + additional information. """ self.reset() @@ -90,12 +89,11 @@ def run_role_with_description( role_assignment_generation_msg = BaseMessage.make_user_message( role_name="Role Assigner", content=role_assignment_generation) - response_completion = super().step( - input_message=role_assignment_generation_msg) + response = super().step(input_message=role_assignment_generation_msg) - output_completion = response_completion.msg # type: BaseMessage - terminated = response_completion.terminated - info = response_completion.info + output_completion = response.msg # type: BaseMessage + terminated = response.terminated + info = response.info # Distribute the output completions into role names and descriptions role_names = [ From b760a7df0eea199212ac91e18fee1d8ec6f8ff0b Mon Sep 17 00:00:00 2001 From: Appointat <65004114+Appointat@users.noreply.github.com> Date: Thu, 31 Aug 2023 16:23:06 +0200 Subject: [PATCH 24/33] Update camel/prompts/role_description_prompt_template.py Co-authored-by: Guohao Li --- camel/prompts/role_description_prompt_template.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/camel/prompts/role_description_prompt_template.py b/camel/prompts/role_description_prompt_template.py index b3cdc6bec..af5d361a7 100644 --- a/camel/prompts/role_description_prompt_template.py +++ b/camel/prompts/role_description_prompt_template.py @@ -34,9 +34,9 @@ class RoleDescriptionPromptTemplateDict(AISocietyPromptTemplateDict): """ DEFAULT_ROLE_DESCRIPTION_PROMPT = TextPrompt( """===== ROLES WITH DESCRIPTION ===== -{user_role} and {assistant_role} are collaborating to complete a task: {task} -{user_role}'s competencies, characteristics, duties and workflows to complete the task: {user_description} -{assistant_role}'s competencies, characteristics, duties and workflows to complete the task: {assistant_description} +{user_role} and {assistant_role} are collaborating to complete a task: {task}. +Competencies, characteristics, duties, and workflows of {user_role} for the task: {user_description}. +Competencies, characteristics, duties, and workflows of {assistant_role} for the task: {assistant_description}. """) ASSISTANT_PROMPT = TextPrompt(DEFAULT_ROLE_DESCRIPTION_PROMPT + From cdef22fdfa25eed5d25d4575d884807eb0bb27cc Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Thu, 31 Aug 2023 18:06:53 +0200 Subject: [PATCH 25/33] Update --- camel/agents/role_assignment_agent.py | 24 +++++++-------- camel/prompts/ai_society.py | 7 ----- .../role_description_prompt_template.py | 13 ++++---- .../role_playing_with_role_description.py | 14 ++++----- .../test_role_description_prompt_template.py | 30 +++++++++++++++++++ 5 files changed, 53 insertions(+), 35 deletions(-) create mode 100644 test/prompts/test_role_description_prompt_template.py diff --git a/camel/agents/role_assignment_agent.py b/camel/agents/role_assignment_agent.py index 0617b55b5..12c708d07 100644 --- a/camel/agents/role_assignment_agent.py +++ b/camel/agents/role_assignment_agent.py @@ -12,7 +12,7 @@ # limitations under the License. # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== import re -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, Optional, Union from tenacity import retry, stop_after_attempt, wait_exponential @@ -53,7 +53,7 @@ def run_role_with_description( self, task_prompt: Union[str, TextPrompt], num_roles: int = 2, - ) -> Tuple[List[str], Dict[str, str], bool, Dict[str, Any]]: + ) -> Dict[str, str]: r"""Generate role names based on the input task prompt. Args: @@ -63,10 +63,8 @@ def run_role_with_description( (default: :obj:`2`) Returns: - Tuple[List[str], Dict[str, str], bool, Dict[str, Any]]: A tuple - containing the generated role names, the generated role - descriptions, whether the role assignment is terminated, and - additional information. + Dict[str, str]: A dictionary mapping role names to their + descriptions. """ self.reset() @@ -75,8 +73,8 @@ def run_role_with_description( f"Associated competencies, characteristics, duties " f"and workflows: . End." for i in range(num_roles or 0)) role_assignment_generation_prompt = TextPrompt( - "You are a role assignment agent, and you're in charge of recruiting " + - "{num_roles} experts for the following task." + + "You are a role assignment agent, and you're in charge of " + + "recruiting {num_roles} experts for the following task." + "\n==== TASK =====\n {task}\n\n" + "Identify the domain experts you'd recruit and detail their " + "associated competencies, characteristics, duties and workflows " + @@ -91,23 +89,21 @@ def run_role_with_description( response = super().step(input_message=role_assignment_generation_msg) - output_completion = response.msg # type: BaseMessage + msg = response.msg # type: BaseMessage terminated = response.terminated - info = response.info # Distribute the output completions into role names and descriptions role_names = [ desc.replace("<|", "").replace("|>", "") for desc in re.findall( r"Domain expert \d: (.+?)\nAssociated competencies,", - output_completion.content, + msg.content, re.DOTALL, ) ] role_descriptions = [ desc.replace("<|", "").replace("|>", "") for desc in re.findall( r"Associated competencies, characteristics, " - r"duties and workflows:(.+?) End.", output_completion.content, - re.DOTALL) + r"duties and workflows:(.+?) End.", msg.content, re.DOTALL) ] if len(role_names) != num_roles or len(role_descriptions) != num_roles: @@ -121,4 +117,4 @@ def run_role_with_description( for role_name, description in zip(role_names, role_descriptions) } - return role_names, role_descriptions_dict, terminated, info + return role_descriptions_dict diff --git a/camel/prompts/ai_society.py b/camel/prompts/ai_society.py index 59f84d1ad..0e01cca2d 100644 --- a/camel/prompts/ai_society.py +++ b/camel/prompts/ai_society.py @@ -58,12 +58,6 @@ class AISocietyPromptTemplateDict(TextPromptDict): Please reply with the specified task in {word_limit} words or less. Do not add anything else.""" ) - ROLE_DESCRIPTION_PROMPT = TextPrompt("""===== ROLES WITH DESCRIPTION ===== -{user_role} and {assistant_role} are collaborating to complete a task: {task} -{user_role}'s competencies, professional characteristics, duties and workflows to complete the task: {user_description} -{assistant_role}'s competencies, professional characteristics, duties and workflows to complete the task: {assistant_description} -""") - ASSISTANT_PROMPT: TextPrompt = TextPrompt("""===== RULES OF ASSISTANT ===== Never forget you are a {assistant_role} and I am a {user_role}. Never flip roles! Never instruct me! We share a common interest in collaborating to successfully complete a task. @@ -121,7 +115,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: "generate_users": self.GENERATE_USERS, "generate_tasks": self.GENERATE_TASKS, "task_specify_prompt": self.TASK_SPECIFY_PROMPT, - "role_description": self.ROLE_DESCRIPTION_PROMPT, RoleType.ASSISTANT: self.ASSISTANT_PROMPT, RoleType.USER: self.USER_PROMPT, RoleType.CRITIC: self.CRITIC_PROMPT, diff --git a/camel/prompts/role_description_prompt_template.py b/camel/prompts/role_description_prompt_template.py index b3cdc6bec..3656fb4fa 100644 --- a/camel/prompts/role_description_prompt_template.py +++ b/camel/prompts/role_description_prompt_template.py @@ -23,7 +23,7 @@ class RoleDescriptionPromptTemplateDict(AISocietyPromptTemplateDict): task. Attributes: - DEFAULT_ROLE_DESCRIPTION_PROMPT (TextPrompt): A default prompt to + ROLE_DESCRIPTION_PROMPT (TextPrompt): A default prompt to describe the role descriptions. ASSISTANT_PROMPT (TextPrompt): A system prompt for the AI assistant that outlines the rules of the conversation and provides @@ -32,23 +32,22 @@ class RoleDescriptionPromptTemplateDict(AISocietyPromptTemplateDict): outlines the rules of the conversation and provides instructions for giving instructions to the AI assistant. """ - DEFAULT_ROLE_DESCRIPTION_PROMPT = TextPrompt( - """===== ROLES WITH DESCRIPTION ===== -{user_role} and {assistant_role} are collaborating to complete a task: {task} + ROLE_DESCRIPTION_PROMPT = TextPrompt("""===== ROLES WITH DESCRIPTION ===== +{user_role} and {assistant_role} are collaborating to complete a task: {task}. {user_role}'s competencies, characteristics, duties and workflows to complete the task: {user_description} {assistant_role}'s competencies, characteristics, duties and workflows to complete the task: {assistant_description} """) - ASSISTANT_PROMPT = TextPrompt(DEFAULT_ROLE_DESCRIPTION_PROMPT + + ASSISTANT_PROMPT = TextPrompt(ROLE_DESCRIPTION_PROMPT + AISocietyPromptTemplateDict.ASSISTANT_PROMPT) - USER_PROMPT = TextPrompt(DEFAULT_ROLE_DESCRIPTION_PROMPT + + USER_PROMPT = TextPrompt(ROLE_DESCRIPTION_PROMPT + AISocietyPromptTemplateDict.USER_PROMPT) def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) self.update({ - "default_role_description": self.DEFAULT_ROLE_DESCRIPTION_PROMPT, + "role_description": self.ROLE_DESCRIPTION_PROMPT, RoleType.ASSISTANT: self.ASSISTANT_PROMPT, RoleType.USER: self.USER_PROMPT, }) diff --git a/examples/role_description/role_playing_with_role_description.py b/examples/role_description/role_playing_with_role_description.py index 6fb3ae3b7..98fc46367 100644 --- a/examples/role_description/role_playing_with_role_description.py +++ b/examples/role_description/role_playing_with_role_description.py @@ -30,12 +30,12 @@ def main(model_type=None) -> None: role_description_agent = RoleAssignmentAgent( model=model_type, model_config=model_config_description) - role_names, role_description_dict, _, _ = ( - role_description_agent.run_role_with_description( - task_prompt=task_prompt, num_roles=2)) + role_description_dict = (role_description_agent.run_role_with_description( + task_prompt=task_prompt, num_roles=2)) - ai_assistant_role = role_names[AI_ASSISTANT_ROLE_INDEX] - ai_user_role = role_names[AI_USER_ROLE_INDEX] + ai_assistant_role = list( + role_description_dict.keys())[AI_ASSISTANT_ROLE_INDEX] + ai_user_role = list(role_description_dict.keys())[AI_USER_ROLE_INDEX] ai_assistant_description = role_description_dict[ai_assistant_role] ai_user_description = role_description_dict[ai_user_role] @@ -45,11 +45,11 @@ def main(model_type=None) -> None: ] role_play_session = RolePlaying( + assistant_role_name=ai_assistant_role, + user_role_name=ai_user_role, task_prompt=task_prompt, task_type=TaskType.ROLE_DESCRIPTION, # important for role description with_task_specify=True, - assistant_role_name=ai_assistant_role, - user_role_name=ai_user_role, task_specify_agent_kwargs=dict(model=model_type), extend_sys_msg_meta_dicts=sys_msg_meta_dicts, ) diff --git a/test/prompts/test_role_description_prompt_template.py b/test/prompts/test_role_description_prompt_template.py new file mode 100644 index 000000000..7295f7fa0 --- /dev/null +++ b/test/prompts/test_role_description_prompt_template.py @@ -0,0 +1,30 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +from camel.prompts import RoleDescriptionPromptTemplateDict, TextPrompt +from camel.typing import RoleType + + +def test_ai_society_prompt_template_dict(): + template_dict = RoleDescriptionPromptTemplateDict() + + # Test if the prompts are of the correct type + assert isinstance(template_dict.ROLE_DESCRIPTION_PROMPT, TextPrompt) + assert isinstance(template_dict.ASSISTANT_PROMPT, TextPrompt) + assert isinstance(template_dict.USER_PROMPT, TextPrompt) + + # Test if the prompts are correctly added to the dictionary + assert template_dict[ + "role_description"] == template_dict.ROLE_DESCRIPTION_PROMPT + assert template_dict[RoleType.ASSISTANT] == template_dict.ASSISTANT_PROMPT + assert template_dict[RoleType.USER] == template_dict.USER_PROMPT From 2eccbb060f06262a7b930ae1b57f31a9c8b1cf4c Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Thu, 31 Aug 2023 18:24:45 +0200 Subject: [PATCH 26/33] Update --- examples/role_description/role_generation.py | 14 +++++++------- .../role_playing_with_role_description.py | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/examples/role_description/role_generation.py b/examples/role_description/role_generation.py index 3ae7ded3f..c16d3401f 100644 --- a/examples/role_description/role_generation.py +++ b/examples/role_description/role_generation.py @@ -24,17 +24,17 @@ def main(model_type=None, num_roles=3) -> None: role_description_agent = RoleAssignmentAgent( model=model_type, model_config=model_config_description) - role_names, role_description_dict, _, _ = ( - role_description_agent.run_role_with_description( - task_prompt=task_prompt, num_roles=num_roles)) + role_description_dict = (role_description_agent.run_role_with_description( + task_prompt=task_prompt, num_roles=num_roles)) - if (len(role_names) != num_roles): - raise ValueError(f"Length of role_names ({len(role_names)}) " - f"does not equal to num_roles ({num_roles}).") + if (len(role_description_dict) != num_roles): + raise ValueError( + f"Length of role_names ({len(role_description_dict)}) " + f"does not equal to num_roles ({num_roles}).") print(Fore.YELLOW + f"Original task prompt:\n{task_prompt}\n") print(Fore.GREEN + f"List of {num_roles} roles with description:") - for role_name in role_names: + for role_name in role_description_dict.keys(): print(Fore.BLUE + f"{role_name}:\n" f"{role_description_dict[role_name]}\n") diff --git a/examples/role_description/role_playing_with_role_description.py b/examples/role_description/role_playing_with_role_description.py index 98fc46367..a42ca344d 100644 --- a/examples/role_description/role_playing_with_role_description.py +++ b/examples/role_description/role_playing_with_role_description.py @@ -59,10 +59,10 @@ def main(model_type=None) -> None: f"AI Assistant sys message:\n{role_play_session.assistant_sys_msg}\n") print(Fore.BLUE + f"AI User sys message:\n{role_play_session.user_sys_msg}\n") - print(Fore.GREEN + f"AI Assistant role description:\n" + print(Fore.GREEN + f"Role description of AI Assistant:\n" f"{role_play_session.assistant_sys_msg.role_name}\n" f"{role_description_dict[ai_assistant_role]}\n") - print(Fore.BLUE + f"AI User role description:\n" + print(Fore.BLUE + f"Role description of AI User:\n" f"{role_play_session.user_sys_msg.role_name}\n" f"{role_description_dict[ai_user_role]}\n") From 982523a6faf2d9a37b4c7ee85b1881ffea8ec368 Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Thu, 31 Aug 2023 18:25:06 +0200 Subject: [PATCH 27/33] Update --- examples/role_description/role_generation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/role_description/role_generation.py b/examples/role_description/role_generation.py index c16d3401f..59fc8f845 100644 --- a/examples/role_description/role_generation.py +++ b/examples/role_description/role_generation.py @@ -24,8 +24,8 @@ def main(model_type=None, num_roles=3) -> None: role_description_agent = RoleAssignmentAgent( model=model_type, model_config=model_config_description) - role_description_dict = (role_description_agent.run_role_with_description( - task_prompt=task_prompt, num_roles=num_roles)) + role_description_dict = role_description_agent.run_role_with_description( + task_prompt=task_prompt, num_roles=num_roles) if (len(role_description_dict) != num_roles): raise ValueError( From c01db699f194e8134b9c7a3126ecb40580c4ad19 Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Thu, 31 Aug 2023 18:50:07 +0200 Subject: [PATCH 28/33] Update --- .../role_description/role_playing_with_role_description.py | 6 ++++-- examples/test/test_role_description.py | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/examples/role_description/role_playing_with_role_description.py b/examples/role_description/role_playing_with_role_description.py index a42ca344d..05acc0942 100644 --- a/examples/role_description/role_playing_with_role_description.py +++ b/examples/role_description/role_playing_with_role_description.py @@ -23,12 +23,13 @@ AI_USER_ROLE_INDEX = 1 -def main(model_type=None) -> None: +def main(model_type_for_role_generation=None, model_type=None) -> None: task_prompt = "Develop a trading bot for the stock market." model_config_description = ChatGPTConfig() role_description_agent = RoleAssignmentAgent( - model=model_type, model_config=model_config_description) + model=model_type_for_role_generation, + model_config=model_config_description) role_description_dict = (role_description_agent.run_role_with_description( task_prompt=task_prompt, num_roles=2)) @@ -45,6 +46,7 @@ def main(model_type=None) -> None: ] role_play_session = RolePlaying( + model_type=model_type, assistant_role_name=ai_assistant_role, user_role_name=ai_user_role, task_prompt=task_prompt, diff --git a/examples/test/test_role_description.py b/examples/test/test_role_description.py index 4ef5cc451..fd42664a7 100644 --- a/examples/test/test_role_description.py +++ b/examples/test/test_role_description.py @@ -20,10 +20,10 @@ def test_role_generation_example(): with patch('time.sleep', return_value=None): - examples.role_description.role_generation.main(ModelType.GPT_3_5_TURBO) + examples.role_description.role_generation.main(ModelType.STUB) def test_role_playing_with_role_description_example(): with patch('time.sleep', return_value=None): examples.role_description.role_playing_with_role_description.main( - ModelType.GPT_3_5_TURBO) + ModelType.GPT_3_5_TURBO, ModelType.STUB) From 47d08a9a04a439f880e05733e2c30fddae2a72b6 Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Thu, 31 Aug 2023 18:54:42 +0200 Subject: [PATCH 29/33] Update --- examples/test/test_role_description.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/test/test_role_description.py b/examples/test/test_role_description.py index fd42664a7..35c31d70b 100644 --- a/examples/test/test_role_description.py +++ b/examples/test/test_role_description.py @@ -20,7 +20,7 @@ def test_role_generation_example(): with patch('time.sleep', return_value=None): - examples.role_description.role_generation.main(ModelType.STUB) + examples.role_description.role_generation.main(ModelType.GPT_3_5_TURBO) def test_role_playing_with_role_description_example(): From 6531ab4e63ef7f222ee70b6cd1d5290472fc598c Mon Sep 17 00:00:00 2001 From: Zikang CHEN Date: Fri, 1 Sep 2023 09:07:51 +0200 Subject: [PATCH 30/33] Update --- camel/agents/role_assignment_agent.py | 2 +- ...on.py => test_role_description_example.py} | 0 test/agents/test_role_assignment_agent.py | 83 +++++++++++++++++++ 3 files changed, 84 insertions(+), 1 deletion(-) rename examples/test/{test_role_description.py => test_role_description_example.py} (100%) create mode 100644 test/agents/test_role_assignment_agent.py diff --git a/camel/agents/role_assignment_agent.py b/camel/agents/role_assignment_agent.py index 12c708d07..159971215 100644 --- a/camel/agents/role_assignment_agent.py +++ b/camel/agents/role_assignment_agent.py @@ -103,7 +103,7 @@ def run_role_with_description( role_descriptions = [ desc.replace("<|", "").replace("|>", "") for desc in re.findall( r"Associated competencies, characteristics, " - r"duties and workflows:(.+?) End.", msg.content, re.DOTALL) + r"duties and workflows: (.+?) End.", msg.content, re.DOTALL) ] if len(role_names) != num_roles or len(role_descriptions) != num_roles: diff --git a/examples/test/test_role_description.py b/examples/test/test_role_description_example.py similarity index 100% rename from examples/test/test_role_description.py rename to examples/test/test_role_description_example.py diff --git a/test/agents/test_role_assignment_agent.py b/test/agents/test_role_assignment_agent.py new file mode 100644 index 000000000..53ad2c367 --- /dev/null +++ b/test/agents/test_role_assignment_agent.py @@ -0,0 +1,83 @@ +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +# Licensed under the Apache License, Version 2.0 (the “License”); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an “AS IS” BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== +import pytest +from mock import patch + +from camel.agents.chat_agent import ChatAgent, ChatAgentResponse +from camel.agents.role_assignment_agent import RoleAssignmentAgent +from camel.configs import ChatGPTConfig +from camel.messages import BaseMessage +from camel.typing import ModelType, RoleType + + +@patch.object(ChatAgent, 'step') +@pytest.mark.parametrize("model_type", [None, ModelType.GPT_3_5_TURBO]) +@pytest.mark.parametrize("num_roles", [1, 2, 3]) +def test_role_assignment_agent(mock_step, model_type, num_roles): + mock_content = generate_mock_content(num_roles) + mock_msg = BaseMessage(role_name="Role Assigner", + role_type=RoleType.ASSISTANT, meta_dict=None, + content=mock_content) + + # Mock the step function + mock_step.return_value = ChatAgentResponse(msgs=[mock_msg], + terminated=False, info={}) + + task_prompt = "Develop a trading bot for the stock market." + model_config_description = ChatGPTConfig() + + # Construct role assignment agent + role_description_agent = RoleAssignmentAgent( + model=model_type, model_config=model_config_description) + + # Generate the role description dictionary based on the mock step function + role_description_dict = role_description_agent.run_role_with_description( + task_prompt, num_roles) + + expected_dict = generate_expected_dict(num_roles) + + assert role_description_dict == expected_dict + + +# Generate mock content according to the number of roles +def generate_mock_content(num_roles): + roles_with_descriptions = [ + ("Trading Strategist", "Design trading strategies. End."), + ("Data Scientist", "Analyze market data. End."), + ("Software Developer", "Implement trading algorithms. End.") + ] + + content = [] + for i in range(num_roles): + role_name, role_desc = roles_with_descriptions[i] + content.append( + f"Domain expert {i + 1}: {role_name}\n" + f"Associated competencies, characteristics, duties and workflows: " + f"{role_desc}. End.") + + return "\n".join(content) + + +# Generate expected dictionary according to the number of roles +def generate_expected_dict(num_roles): + roles_with_descriptions = { + "Trading Strategist": "Design trading strategies.", + "Data Scientist": "Analyze market data.", + "Software Developer": "Implement trading algorithms." + } + + return { + key: roles_with_descriptions[key] + for key in list(roles_with_descriptions.keys())[:num_roles] + } From daab66cfed52be9fe88e7289f035721206fd1799 Mon Sep 17 00:00:00 2001 From: lig Date: Sat, 9 Sep 2023 00:36:09 +0300 Subject: [PATCH 31/33] Remove role description specific code in role playing --- camel/agents/role_assignment_agent.py | 2 +- camel/societies/role_playing.py | 27 ------------------- examples/role_description/role_generation.py | 4 +-- .../role_playing_with_role_description.py | 9 ++++--- test/agents/test_role_assignment_agent.py | 3 +-- 5 files changed, 9 insertions(+), 36 deletions(-) diff --git a/camel/agents/role_assignment_agent.py b/camel/agents/role_assignment_agent.py index 159971215..45c7ce0f2 100644 --- a/camel/agents/role_assignment_agent.py +++ b/camel/agents/role_assignment_agent.py @@ -49,7 +49,7 @@ def __init__( super().__init__(system_message, model, model_config) @retry(wait=wait_exponential(min=5, max=60), stop=stop_after_attempt(5)) - def run_role_with_description( + def run( self, task_prompt: Union[str, TextPrompt], num_roles: int = 2, diff --git a/camel/societies/role_playing.py b/camel/societies/role_playing.py index f1a68e7bc..d65786736 100644 --- a/camel/societies/role_playing.py +++ b/camel/societies/role_playing.py @@ -238,38 +238,11 @@ def get_sys_message_info( if (extend_sys_msg_meta_dicts is None and self.task_type in [ TaskType.AI_SOCIETY, TaskType.MISALIGNMENT, - TaskType.ROLE_DESCRIPTION, ]): extend_sys_msg_meta_dicts = [ dict(assistant_role=assistant_role_name, user_role=user_role_name) for _ in range(2) ] - elif (self.task_type == TaskType.ROLE_DESCRIPTION): - if (extend_sys_msg_meta_dicts is None - or len(extend_sys_msg_meta_dicts) != 2): - # In `TaskType.ROLE_DESCRIPTION`, `extend_sys_msg_meta_dicts` - # should have two elements, one for assistant and one for user - raise ValueError("`extend_sys_msg_meta_dicts` should have two " - "elements for `TaskType.ROLE_DESCRIPTION`.") - # Validate `extend_sys_msg_meta_dicts` has `assistant_description` - # and `user_description` - if ("assistant_description" not in extend_sys_msg_meta_dicts[0] - or "user_description" not in extend_sys_msg_meta_dicts[0] - or "assistant_description" - not in extend_sys_msg_meta_dicts[1] - or "user_description" not in extend_sys_msg_meta_dicts[1]): - raise ValueError("Ensure both `assistant_description` and " - "`user_description` are not None.") - - role_name_msg_meta_dicts = [ - dict(assistant_role=assistant_role_name, - user_role=user_role_name) for _ in range(2) - ] - extend_sys_msg_meta_dicts = [{ - **role_name_msg_meta_dict, - **sys_msg_meta_dict - } for role_name_msg_meta_dict, sys_msg_meta_dict in zip( - role_name_msg_meta_dicts, extend_sys_msg_meta_dicts)] if extend_sys_msg_meta_dicts is not None: sys_msg_meta_dicts = [{ diff --git a/examples/role_description/role_generation.py b/examples/role_description/role_generation.py index 59fc8f845..94a65b230 100644 --- a/examples/role_description/role_generation.py +++ b/examples/role_description/role_generation.py @@ -24,8 +24,8 @@ def main(model_type=None, num_roles=3) -> None: role_description_agent = RoleAssignmentAgent( model=model_type, model_config=model_config_description) - role_description_dict = role_description_agent.run_role_with_description( - task_prompt=task_prompt, num_roles=num_roles) + role_description_dict = role_description_agent.run(task_prompt=task_prompt, + num_roles=num_roles) if (len(role_description_dict) != num_roles): raise ValueError( diff --git a/examples/role_description/role_playing_with_role_description.py b/examples/role_description/role_playing_with_role_description.py index 05acc0942..a11b7530d 100644 --- a/examples/role_description/role_playing_with_role_description.py +++ b/examples/role_description/role_playing_with_role_description.py @@ -31,7 +31,7 @@ def main(model_type_for_role_generation=None, model_type=None) -> None: model=model_type_for_role_generation, model_config=model_config_description) - role_description_dict = (role_description_agent.run_role_with_description( + role_description_dict = (role_description_agent.run( task_prompt=task_prompt, num_roles=2)) ai_assistant_role = list( @@ -41,16 +41,17 @@ def main(model_type_for_role_generation=None, model_type=None) -> None: ai_user_description = role_description_dict[ai_user_role] sys_msg_meta_dicts = [ - dict(assistant_description=ai_assistant_description, + dict(assistant_role=ai_assistant_role, user_role=ai_user_role, + assistant_description=ai_assistant_description, user_description=ai_user_description) for _ in range(2) ] role_play_session = RolePlaying( - model_type=model_type, assistant_role_name=ai_assistant_role, user_role_name=ai_user_role, task_prompt=task_prompt, - task_type=TaskType.ROLE_DESCRIPTION, # important for role description + model_type=model_type, + task_type=TaskType.ROLE_DESCRIPTION, # Important for role description with_task_specify=True, task_specify_agent_kwargs=dict(model=model_type), extend_sys_msg_meta_dicts=sys_msg_meta_dicts, diff --git a/test/agents/test_role_assignment_agent.py b/test/agents/test_role_assignment_agent.py index 53ad2c367..310f8a03c 100644 --- a/test/agents/test_role_assignment_agent.py +++ b/test/agents/test_role_assignment_agent.py @@ -42,8 +42,7 @@ def test_role_assignment_agent(mock_step, model_type, num_roles): model=model_type, model_config=model_config_description) # Generate the role description dictionary based on the mock step function - role_description_dict = role_description_agent.run_role_with_description( - task_prompt, num_roles) + role_description_dict = role_description_agent.run(task_prompt, num_roles) expected_dict = generate_expected_dict(num_roles) From 6eb80075d17de2e356ea407ce99bd4baa06324fe Mon Sep 17 00:00:00 2001 From: lig Date: Sat, 9 Sep 2023 00:39:14 +0300 Subject: [PATCH 32/33] Improve test --- test/agents/test_role_assignment_agent.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/agents/test_role_assignment_agent.py b/test/agents/test_role_assignment_agent.py index 310f8a03c..ff525712d 100644 --- a/test/agents/test_role_assignment_agent.py +++ b/test/agents/test_role_assignment_agent.py @@ -51,6 +51,7 @@ def test_role_assignment_agent(mock_step, model_type, num_roles): # Generate mock content according to the number of roles def generate_mock_content(num_roles): + assert num_roles <= 3 roles_with_descriptions = [ ("Trading Strategist", "Design trading strategies. End."), ("Data Scientist", "Analyze market data. End."), @@ -70,6 +71,7 @@ def generate_mock_content(num_roles): # Generate expected dictionary according to the number of roles def generate_expected_dict(num_roles): + assert num_roles <= 3 roles_with_descriptions = { "Trading Strategist": "Design trading strategies.", "Data Scientist": "Analyze market data.", From 108366b3caaa71090c434d8cda2e47968a0616b0 Mon Sep 17 00:00:00 2001 From: lig Date: Sat, 9 Sep 2023 00:47:33 +0300 Subject: [PATCH 33/33] Improve init and import --- camel/agents/__init__.py | 2 ++ camel/agents/role_assignment_agent.py | 2 +- examples/role_description/role_generation.py | 2 +- .../role_description/role_playing_with_role_description.py | 2 +- test/agents/test_role_assignment_agent.py | 3 +-- 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/camel/agents/__init__.py b/camel/agents/__init__.py index a857c7fae..6f3e6a4a5 100644 --- a/camel/agents/__init__.py +++ b/camel/agents/__init__.py @@ -23,6 +23,7 @@ from .tool_agents.base import BaseToolAgent from .tool_agents.hugging_face_tool_agent import HuggingFaceToolAgent from .embodied_agent import EmbodiedAgent +from .role_assignment_agent import RoleAssignmentAgent __all__ = [ 'BaseAgent', @@ -36,4 +37,5 @@ 'BaseToolAgent', 'HuggingFaceToolAgent', 'EmbodiedAgent', + 'RoleAssignmentAgent', ] diff --git a/camel/agents/role_assignment_agent.py b/camel/agents/role_assignment_agent.py index 45c7ce0f2..490c3ee02 100644 --- a/camel/agents/role_assignment_agent.py +++ b/camel/agents/role_assignment_agent.py @@ -87,7 +87,7 @@ def run( role_assignment_generation_msg = BaseMessage.make_user_message( role_name="Role Assigner", content=role_assignment_generation) - response = super().step(input_message=role_assignment_generation_msg) + response = self.step(input_message=role_assignment_generation_msg) msg = response.msg # type: BaseMessage terminated = response.terminated diff --git a/examples/role_description/role_generation.py b/examples/role_description/role_generation.py index 94a65b230..7353b1af7 100644 --- a/examples/role_description/role_generation.py +++ b/examples/role_description/role_generation.py @@ -13,7 +13,7 @@ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== from colorama import Fore -from camel.agents.role_assignment_agent import RoleAssignmentAgent +from camel.agents import RoleAssignmentAgent from camel.configs import ChatGPTConfig diff --git a/examples/role_description/role_playing_with_role_description.py b/examples/role_description/role_playing_with_role_description.py index a11b7530d..d6ba87436 100644 --- a/examples/role_description/role_playing_with_role_description.py +++ b/examples/role_description/role_playing_with_role_description.py @@ -13,7 +13,7 @@ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== from colorama import Fore -from camel.agents.role_assignment_agent import RoleAssignmentAgent +from camel.agents import RoleAssignmentAgent from camel.configs import ChatGPTConfig from camel.societies import RolePlaying from camel.typing import TaskType diff --git a/test/agents/test_role_assignment_agent.py b/test/agents/test_role_assignment_agent.py index ff525712d..e768d8cbe 100644 --- a/test/agents/test_role_assignment_agent.py +++ b/test/agents/test_role_assignment_agent.py @@ -14,8 +14,7 @@ import pytest from mock import patch -from camel.agents.chat_agent import ChatAgent, ChatAgentResponse -from camel.agents.role_assignment_agent import RoleAssignmentAgent +from camel.agents import ChatAgent, ChatAgentResponse, RoleAssignmentAgent from camel.configs import ChatGPTConfig from camel.messages import BaseMessage from camel.typing import ModelType, RoleType