Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chores: move role_playing_with_functions from utils into example #549

Merged
merged 6 commits into from
May 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions camel/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
get_system_information,
get_task_list,
print_text_animated,
role_playing_with_function,
to_pascal,
)
from .token_counting import (
Expand Down Expand Up @@ -52,5 +51,4 @@
'OpenSourceTokenCounter',
'dependencies_required',
'api_keys_required',
'role_playing_with_function',
]
170 changes: 0 additions & 170 deletions camel/utils/commons.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,34 +30,6 @@
F = TypeVar('F', bound=Callable[..., Any])


# Set lazy import
def get_lazy_imported_functions_module():
from camel.functions import (
MAP_FUNCS,
MATH_FUNCS,
OPENAPI_FUNCS,
SEARCH_FUNCS,
TWITTER_FUNCS,
WEATHER_FUNCS,
)

return [
*MATH_FUNCS,
*SEARCH_FUNCS,
*WEATHER_FUNCS,
*MAP_FUNCS,
*TWITTER_FUNCS,
*OPENAPI_FUNCS,
]


# Set lazy import
def get_lazy_imported_types_module():
from camel.types import ModelType

return ModelType.GPT_3_5_TURBO


def api_key_required(func: F) -> F:
r"""Decorator that checks if the API key is available either as an environment variable or passed directly.

Expand Down Expand Up @@ -354,145 +326,3 @@ def to_pascal(snake: str) -> str:


PYDANTIC_V2 = pydantic.VERSION.startswith("2.")


def role_playing_with_function(
task_prompt: str = (
"Assume now is 2024 in the Gregorian calendar, "
"estimate the current age of University of Oxford "
"and then add 10 more years to this age, "
"and get the current weather of the city where "
"the University is located. And tell me what time "
"zone University of Oxford is in. And use my twitter "
"account infomation to create a tweet. Search basketball"
"course from coursera And help me to choose a basketball by klarna."
),
tools: Optional[List] = None,
model_type=None,
chat_turn_limit=10,
assistant_role_name: str = "Searcher",
user_role_name: str = "Professor",
) -> None:
r"""Initializes and conducts a `RolePlaying` with `ChatGPTConfig`
session. The function creates an interactive and dynamic role-play session
where the AI Assistant and User engage based on the given task, roles, and
available functions. It demonstrates the versatility of AI in handling
diverse tasks and user interactions within a structured `RolePlaying`
framework.

Args:
task_prompt (str): The initial task or scenario description to start
the `RolePlaying` session. Defaults to a prompt involving the
estimation of KAUST's age and weather information.
tools (list): A list of functions that the agent can utilize
during the session. Defaults to a combination of math, search, and
weather functions.
model_type (ModelType): The type of chatbot model used for both the
assistant and the user. Defaults to `GPT-4 Turbo`.
chat_turn_limit (int): The maximum number of turns (exchanges) in the
chat session. Defaults to 10.
assistant_role_name (str): The role name assigned to the AI Assistant.
Defaults to 'Searcher'.
user_role_name (str): The role name assigned to the User. Defaults to
'Professor'.

Returns:
None: This function does not return any value but prints out the
session's dialogues and outputs.
"""

# Run lazy import
if tools is None:
tools = get_lazy_imported_functions_module()
if model_type is None:
model_type = get_lazy_imported_types_module()

from colorama import Fore

from camel.agents.chat_agent import FunctionCallingRecord
from camel.configs import ChatGPTConfig
from camel.societies import RolePlaying

task_prompt = task_prompt
user_model_config = ChatGPTConfig(temperature=0.0)

assistant_model_config = ChatGPTConfig(
tools=tools,
temperature=0.0,
)

role_play_session = RolePlaying(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
assistant_agent_kwargs=dict(
model_type=model_type,
model_config=assistant_model_config,
tools=tools,
),
user_agent_kwargs=dict(
model_type=model_type,
model_config=user_model_config,
),
task_prompt=task_prompt,
with_task_specify=False,
)

print(
Fore.GREEN
+ f"AI Assistant sys message:\n{role_play_session.assistant_sys_msg}\n"
)
print(
Fore.BLUE + f"AI User sys message:\n{role_play_session.user_sys_msg}\n"
)

print(Fore.YELLOW + f"Original task prompt:\n{task_prompt}\n")
print(
Fore.CYAN
+ f"Specified task prompt:\n{role_play_session.specified_task_prompt}\n"
)
print(Fore.RED + f"Final task prompt:\n{role_play_session.task_prompt}\n")

n = 0
input_msg = role_play_session.init_chat()
while n < chat_turn_limit:
n += 1
assistant_response, user_response = role_play_session.step(input_msg)

if assistant_response.terminated:
print(
Fore.GREEN
+ (
"AI Assistant terminated. Reason: "
f"{assistant_response.info['termination_reasons']}."
)
)
break
if user_response.terminated:
print(
Fore.GREEN
+ (
"AI User terminated. "
f"Reason: {user_response.info['termination_reasons']}."
)
)
break

# Print output from the user
print_text_animated(
Fore.BLUE + f"AI User:\n\n{user_response.msg.content}\n"
)

# Print output from the assistant, including any function
# execution information
print_text_animated(Fore.GREEN + "AI Assistant:")
tool_calls: List[FunctionCallingRecord] = assistant_response.info[
'tool_calls'
]
for func_record in tool_calls:
print_text_animated(f"{func_record}")
print_text_animated(f"{assistant_response.msg.content}\n")

if "CAMEL_TASK_DONE" in user_response.msg.content:
break

input_msg = assistant_response.msg
10 changes: 5 additions & 5 deletions examples/function_call/github_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from colorama import Fore

from camel.agents import ChatAgent
from camel.configs import FunctionCallingConfig
from camel.configs import ChatGPTConfig
from camel.messages import BaseMessage
from camel.toolkits import GithubToolkit
from camel.utils import print_text_animated
Expand Down Expand Up @@ -46,15 +46,15 @@ def solve_issue(
content="""You are an experienced software engineer who
specializes on data structures and algorithms tasks.""",
)
assistant_model_config = FunctionCallingConfig.from_openai_function_list(
function_list=toolkit.get_tools(),
kwargs=dict(temperature=0.0),
assistant_model_config = ChatGPTConfig(
tools=toolkit.get_tools(),
temperature=0.0,
)
agent = ChatAgent(
assistant_sys_msg,
model_type=model,
model_config=assistant_model_config,
function_list=toolkit.get_tools(),
tools=toolkit.get_tools(),
)
agent.reset()
Copy link
Member

@Appointat Appointat May 27, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The agent is initialized, so is agent.reset() necessary?


Expand Down
26 changes: 0 additions & 26 deletions examples/function_call/role_playing_with_function.py

This file was deleted.

134 changes: 134 additions & 0 deletions examples/function_call/role_playing_with_functions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========

from typing import List

from colorama import Fore

from camel.agents.chat_agent import FunctionCallingRecord
from camel.configs import ChatGPTConfig
from camel.functions import (
MAP_FUNCS,
MATH_FUNCS,
SEARCH_FUNCS,
TWITTER_FUNCS,
WEATHER_FUNCS,
)
from camel.societies import RolePlaying
from camel.types import ModelType
from camel.utils import print_text_animated


def main(model_type=ModelType.GPT_3_5_TURBO, chat_turn_limit=10) -> None:
task_prompt = (
"Assume now is 2024 in the Gregorian calendar, "
"estimate the current age of University of Oxford "
"and then add 10 more years to this age, "
"and get the current weather of the city where "
"the University is located."
)

user_model_config = ChatGPTConfig(temperature=0.0)

function_list = [
*MATH_FUNCS,
*SEARCH_FUNCS,
*WEATHER_FUNCS,
*MAP_FUNCS,
*TWITTER_FUNCS,
]
assistant_model_config = ChatGPTConfig(
tools=function_list,
temperature=0.0,
)

role_play_session = RolePlaying(
assistant_role_name="Searcher",
user_role_name="Professor",
assistant_agent_kwargs=dict(
model_type=model_type,
model_config=assistant_model_config,
tools=function_list,
),
user_agent_kwargs=dict(
model_type=model_type,
model_config=user_model_config,
),
task_prompt=task_prompt,
with_task_specify=False,
)

print(
Fore.GREEN
+ f"AI Assistant sys message:\n{role_play_session.assistant_sys_msg}\n"
)
print(
Fore.BLUE + f"AI User sys message:\n{role_play_session.user_sys_msg}\n"
)

print(Fore.YELLOW + f"Original task prompt:\n{task_prompt}\n")
print(
Fore.CYAN
+ f"Specified task prompt:\n{role_play_session.specified_task_prompt}\n"
)
print(Fore.RED + f"Final task prompt:\n{role_play_session.task_prompt}\n")

n = 0
input_msg = role_play_session.init_chat()
while n < chat_turn_limit:
n += 1
assistant_response, user_response = role_play_session.step(input_msg)

if assistant_response.terminated:
print(
Fore.GREEN
+ (
"AI Assistant terminated. Reason: "
f"{assistant_response.info['termination_reasons']}."
)
)
break
if user_response.terminated:
print(
Fore.GREEN
+ (
"AI User terminated. "
f"Reason: {user_response.info['termination_reasons']}."
)
)
break

# Print output from the user
print_text_animated(
Fore.BLUE + f"AI User:\n\n{user_response.msg.content}\n"
)

# Print output from the assistant, including any function
# execution information
print_text_animated(Fore.GREEN + "AI Assistant:")
tool_calls: List[FunctionCallingRecord] = assistant_response.info[
'tool_calls'
]
for func_record in tool_calls:
print_text_animated(f"{func_record}")
print_text_animated(f"{assistant_response.msg.content}\n")

if "CAMEL_TASK_DONE" in user_response.msg.content:
break

input_msg = assistant_response.msg


if __name__ == "__main__":
main()
Loading
Loading