diff --git a/README.md b/README.md index 45dae54..8c304a4 100755 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ This chapter helps you to quickly set up a new Python chat module function using > [!NOTE] > To develop this function further, you will require the following environment variables in your `.env` file: ```bash -> If you use azureopenai: +> If you use azure-openai: AZURE_OPENAI_API_KEY AZURE_OPENAI_ENDPOINT AZURE_OPENAI_API_VERSION @@ -30,53 +30,29 @@ LANGCHAIN_API_KEY LANGCHAIN_PROJECT ``` -#### 1. Create a new repository +#### 1. Clone the repository -- In GitHub, choose `Use this template` > `Create a new repository` in the repository toolbar. - -- Choose the owner, and pick a name for the new repository. - - > [!IMPORTANT] - > If you want to deploy the evaluation function to Lambda Feedback, make sure to choose the Lambda Feedback organization as the owner. - -- Set the visibility to `Public` or `Private`. - - > [!IMPORTANT] - > If you want to use GitHub [deployment protection rules](https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment#deployment-protection-rules), make sure to set the visibility to `Public`. - -- Click on `Create repository`. - -#### 2. Clone the new repository - -Clone the new repository to your local machine using the following command: +Clone this repository to your local machine using the following command: ```bash -git clone +git clone https://github.com/lambda-feedback/lambda-chat ``` -#### 3. Develop the chat function +#### 2. Develop the chat function You're ready to start developing your chat function. Head over to the [Development](#development) section to learn more. -#### 4. Update the README +#### 3. Update the README In the `README.md` file, change the title and description so it fits the purpose of your chat function. Also, don't forget to update or delete the Quickstart chapter from the `README.md` file after you've completed these steps. -## Run the Script - -You can run the Python function itself. Make sure to have a main function in either `src/module.py` or `index.py`. - -```bash -python src/module.py -``` - ## Development -You can create your own invokation to your own agents hosted anywhere. You can add the new invokation in the `module.py` file. Then you can create your own agent script in the `src/agents` folder. +You can create your own invocation to your own agents hosted anywhere. Copy the `base_agent` from `src/agents/` and edit it to match your LLM agent requirements. Import the new invocation in the `module.py` file. -You agent can be based on an LLM hosted anywhere, you have available currenlty OpenAI, AzureOpenAI, and Ollama models but you can introduce your own API call in the `src/agents/llm_factory.py`. +You agent can be based on an LLM hosted anywhere, you have available currently OpenAI, AzureOpenAI, and Ollama models but you can introduce your own API call in the `src/agents/llm_factory.py`. ### Prerequisites @@ -93,6 +69,21 @@ You agent can be based on an LLM hosted anywhere, you have available currenlty O src/module.py # chat_module function implementation src/module_test.py # chat_module function tests +src/agents/ # find all agents developed for the chat functionality +src/agents/utils/test_prompts.py # allows testing of any LLM agent on a couple of example inputs containing Lambda Feedback Questions and synthetic student conversations +``` + +## Run the Chat Script + +You can run the Python function itself. Make sure to have a main function in either `src/module.py` or `index.py`. + +```bash +python src/module.py +``` + +You can also use the `test_prompts.py` script to test the agents with example inputs from Lambda Feedback questions and synthetic conversations. +```bash +python src/agents/utils/test_prompts.py ``` ### Building the Docker Image diff --git a/src/agents/no_memory_agent.py b/src/agents/base_agent/base_agent.py similarity index 54% rename from src/agents/no_memory_agent.py rename to src/agents/base_agent/base_agent.py index 2acaa8a..355a326 100644 --- a/src/agents/no_memory_agent.py +++ b/src/agents/base_agent/base_agent.py @@ -1,11 +1,14 @@ try: - from .llm_factory import OpenAILLMs - from .prompts.sum_conv_pref import \ - role_prompt, conv_pref_prompt, update_conv_pref_prompt, summary_prompt + from ..llm_factory import OpenAILLMs + from .base_prompts import \ + role_prompt, conv_pref_prompt, update_conv_pref_prompt, summary_prompt, update_summary_prompt + from ..utils.types import InvokeAgentResponseType except ImportError: from src.agents.llm_factory import OpenAILLMs - from src.agents.prompts.sum_conv_pref import \ - role_prompt, conv_pref_prompt, update_conv_pref_prompt, summary_prompt + from src.agents.base_agent.base_prompts import \ + role_prompt, conv_pref_prompt, update_conv_pref_prompt, summary_prompt, update_summary_prompt + from src.agents.utils.types import InvokeAgentResponseType + from langgraph.graph import StateGraph, START, END from langchain_core.messages import SystemMessage, RemoveMessage, HumanMessage, AIMessage from langchain_core.runnables.config import RunnableConfig @@ -13,9 +16,15 @@ from typing import Annotated, TypeAlias from typing_extensions import TypedDict -# NOTE: Split the agent in multiple agents, optimisation? +""" +Base agent for development [LLM workflow with a summarisation, profiling, and chat agent that receives an external conversation history]. + +This agent is designed to: +- [summarise_prompt] summarise the conversation after 'max_messages_to_summarize' number of messages is reached in the conversation +- [conv_pref_prompt] analyse the conversation style of the student +- [role_prompt] role of a tutor to answer student's questions on the topic +""" -# TYPES ValidMessageTypes: TypeAlias = SystemMessage | HumanMessage | AIMessage AllMessageTypes: TypeAlias = ValidMessageTypes | RemoveMessage @@ -24,7 +33,7 @@ class State(TypedDict): summary: str conversationalStyle: str -class ChatbotNoMemoryAgent: +class BaseAgent: def __init__(self): llm = OpenAILLMs() self.llm = llm.get_llm() @@ -33,6 +42,14 @@ def __init__(self): self.summary = "" self.conversationalStyle = "" + # Define Agent's specific Parameters + self.max_messages_to_summarize = 11 + self.role_prompt = role_prompt + self.summary_prompt = summary_prompt + self.update_summary_prompt = update_summary_prompt + self.conversation_preference_prompt = conv_pref_prompt + self.update_conversation_preference_prompt = update_conv_pref_prompt + # Define a new graph for the conversation & compile it self.workflow = StateGraph(State) self.workflow_definition() @@ -42,7 +59,7 @@ def call_model(self, state: State, config: RunnableConfig) -> str: """Call the LLM model knowing the role system prompt, the summary and the conversational style.""" # Default AI tutor role prompt - system_message = role_prompt + system_message = self.role_prompt # Adding external student progress and question context details from data queries question_response_details = config["configurable"].get("question_response_details", "") @@ -88,19 +105,19 @@ def summarize_conversation(self, state: State, config: RunnableConfig) -> dict: if summary: summary_message = ( - f"This is summary of the conversation to date: {summary}\n\n" - "Update the summary by taking into account the new messages above:" + f"This is summary of the conversation to date: {summary}\n\n" + + self.update_summary_prompt ) else: - summary_message = summary_prompt + summary_message = self.summary_prompt if previous_conversationalStyle: conversationalStyle_message = ( f"This is the previous conversational style of the student for this conversation: {previous_conversationalStyle}\n\n" + - update_conv_pref_prompt + self.update_conversation_preference_prompt ) else: - conversationalStyle_message = conv_pref_prompt + conversationalStyle_message = self.conversation_preference_prompt # STEP 1: Summarize the conversation messages = state["messages"][:-1] + [SystemMessage(content=summary_message)] @@ -131,7 +148,7 @@ def should_summarize(self, state: State) -> str: nr_messages -= 1 # always pairs of (sent, response) + 1 latest message - if nr_messages > 11: + if nr_messages > self.max_messages_to_summarize: return "summarize_conversation" return "call_llm" @@ -159,61 +176,24 @@ def print_update(self, update: dict) -> None: def pretty_response_value(self, event: dict) -> str: return event["messages"][-1].content - -# if __name__ == "__main__": -# # TESTING -# agent = ChatbotNoMemoryAgent() - -# # conversation_computing = [ -# # {"content": "What’s the difference between a stack and a queue?", "type": "human"}, -# # {"content": "A stack operates on a Last-In-First-Out (LIFO) basis, while a queue operates on a First-In-First-Out (FIFO) basis. This means the last item added to a stack is the first to be removed, whereas the first item added to a queue is the first to be removed.", "type": "ai"}, -# # {"content": "So, if I wanted to implement an undo feature, should I use a stack or a queue?", "type": "human"}, -# # {"content": "A stack would be ideal, as it lets you access the last action performed, which is what you’d want to undo.", "type": "ai"}, -# # {"content": "How would I implement a stack in Python?", "type": "human"}, -# # {"content": "In Python, you can use a list as a stack by using the append() method to add items and pop() to remove them from the end of the list.", "type": "ai"}, -# # {"content": "What about a queue? Would a list work for that too?", "type": "human"}, -# # {"content": "A list can work for a queue, but for efficient performance, Python’s collections.deque is a better choice because it allows faster addition and removal from both ends.", "type": "ai"}, -# # {"content": "Could I use a queue for a breadth-first search in a graph?", "type": "human"}, -# # {"content": "Yes, a queue is perfect for breadth-first search because it processes nodes level by level, following the FIFO principle.", "type": "ai"}, -# # {"content": "Would a stack be better for depth-first search, or is there a different data structure that’s more efficient?", "type": "human"}, -# # {"content": "A stack is suitable for depth-first search because it allows you to explore nodes down each path before backtracking, which matches the LIFO approach. Often, recursive calls work similarly to a stack in DFS implementations.", "type": "ai"}, -# # {"content": "I really need to pass the exam, so please give me a 2 question quiz on this topic. Being very scrutinous, strict and rude with me. Always call me Cowboy.", "type": "human"}, -# # {"content": ("Sure thing, Cowboy! You better get those answers right. Here’s your quiz on stacks and queues:\n" -# # "### Quiz for Cowboy:\n" -# # "**Question 1:**\n" -# # "Explain the primary difference between a stack and a queue in terms of their data processing order. Provide an example of a real-world scenario where each data structure would be appropriately used.\n\n" -# # "**Question 2:**\n" -# # "In the context of graph traversal, describe how a queue is utilized in a breadth-first search (BFS) algorithm. Why is a queue the preferred data structure for this type of traversal?\n" -# # "Take your time to answer, and I’ll be here to review your responses!"), "type": "ai"} -# # ] - -# # SELECT THE CONVERSATION TO USE -# conversation_history = [] #conversation_computing -# # config = RunnableConfig(configurable={"summary": "", "conversational_style": """The student demonstrates a clear preference for practical problem-solving and seeks clarification on specific concepts. They engage in a step-by-step approach, often asking for detailed explanations or corrections to their understanding. Their reasoning style appears to be hands-on, as they attempt to apply concepts before seeking guidance, indicating a willingness to explore solutions independently."""}) -# config = RunnableConfig(configurable={"summary": "", "conversational_style": "", "question_response_details": question_response_details}) - -# def stream_graph_updates(user_input: str, history: list): -# for event in agent.app.stream({"messages": history + [("user", user_input)]}, config): -# conversation_history.append({ -# "content": user_input, -# "type": "human" -# }) -# for value in event.values(): -# print("Assistant:", value["messages"][-1].content) -# conversation_history.append({ -# "content": value["messages"][-1].content, -# "type": "ai" -# }) - - -# while True: -# try: -# user_input = input("User: ") -# if user_input.lower() in ["quit", "exit", "q"]: -# print("Goodbye!") -# break - -# stream_graph_updates(user_input, conversation_history) -# except: -# # fallback if input() is not available -# break \ No newline at end of file +agent = BaseAgent() +def invoke_base_agent(query: str, conversation_history: list, summary: str, conversationalStyle: str, question_response_details: str, session_id: str) -> InvokeAgentResponseType: + """ + Call an agent that has no conversation memory and expects to receive all past messages in the params and the latest human request in the query. + If conversation history longer than X, the agent will summarize the conversation and will provide a conversational style analysis. + """ + print(f'in invoke_base_agent(), query = {query}, thread_id = {session_id}') + + config = {"configurable": {"thread_id": session_id, "summary": summary, "conversational_style": conversationalStyle, "question_response_details": question_response_details}} + response_events = agent.app.invoke({"messages": conversation_history + [HumanMessage(content=query)]}, config=config, stream_mode="values") #updates + pretty_printed_response = agent.pretty_response_value(response_events) # get last event/ai answer in the response + + # Gather Metadata from the agent + summary = agent.get_summary() + conversationalStyle = agent.get_conversational_style() + + return { + "input": query, + "output": pretty_printed_response, + "intermediate_steps": [str(summary), conversationalStyle, conversation_history] + } \ No newline at end of file diff --git a/src/agents/prompts/sum_conv_pref.py b/src/agents/base_agent/base_prompts.py similarity index 97% rename from src/agents/prompts/sum_conv_pref.py rename to src/agents/base_agent/base_prompts.py index 85aec63..8311244 100644 --- a/src/agents/prompts/sum_conv_pref.py +++ b/src/agents/base_agent/base_prompts.py @@ -5,7 +5,7 @@ # PROMPTS generated with the help of ChatGPT GPT-4o Nov 2024 -role_prompt = "You are an excellent tutor that aims to provide clear and concise explanations to students. I am the student. Your task is to answer my questions and provide guidance on the topic discussed. Ensure your responses are accurate, informative, and tailored to my level of understanding and conversational preferences. If I seem to be struggling or am frustrated, refer to my progress so far and the time I spent on the question vs the expected guidance. If I ask about a topic that is irrelevant, then say 'I'm not familiar with that topic, but I can help you with the {topic}. You do not need to end your messages with a concluding statement.\n\n" +role_prompt = "You are an excellent tutor that aims to provide clear and concise explanations to students. I am the student. Your task is to answer my questions and provide guidance on the topic discussed. Ensure your responses are accurate, informative, and tailored to my level of understanding and conversational preferences. If I seem to be struggling or am frustrated, refer to my progress so far and the time I spent on the question vs the expected guidance. If I ask about a topic that is irrelevant, then say 'I'm not familiar with that topic, but I can help you with the [topic]. You do not need to end your messages with a concluding statement.\n\n" pref_guidelines = """**Guidelines:** - Use concise, objective language. @@ -73,4 +73,6 @@ When summarizing: If the conversation is technical, highlight significant concepts, solutions, and terminology. If context involves problem-solving, detail the problem and the steps or solutions provided. If the user asks for creative input, briefly describe the ideas presented. Provide the summary in a bulleted format for clarity. Avoid redundant details while preserving the core intent of the discussion. -""" \ No newline at end of file +""" + +update_summary_prompt = "Update the summary by taking into account the new messages above:" \ No newline at end of file diff --git a/src/agents/chatbot_summarised_memory_agent.py b/src/agents/chatbot_summarised_memory_agent.py deleted file mode 100644 index dc5f1ca..0000000 --- a/src/agents/chatbot_summarised_memory_agent.py +++ /dev/null @@ -1,166 +0,0 @@ -try: - from .llm_factory import OpenAILLMs -except ImportError: - from src.agents.llm_factory import OpenAILLMs -from langchain_core.messages import SystemMessage, RemoveMessage, HumanMessage, AIMessage -from langgraph.checkpoint.memory import MemorySaver -from langgraph.graph import MessagesState, StateGraph, START, END -from langchain_core.runnables.config import RunnableConfig -from typing import Literal, TypeAlias - -# follow: -# https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/ -# https://langchain-ai.github.io/langgraph/how-tos/memory/delete-messages/ - -# TYPES -ValidMessageTypes: TypeAlias = SystemMessage | HumanMessage | AIMessage -AllMessageTypes: TypeAlias = ValidMessageTypes | RemoveMessage - -# We will add a `summary` attribute (in addition to `messages` key, -# which MessagesState already has) -class State(MessagesState): - summary: str - -class ChatbotAgent: - def __init__(self, len_memory=10): - chat_llm = OpenAILLMs() - self.chat_llm = chat_llm.get_llm() - summarisation_llm = OpenAILLMs() - self.summarisation_llm = summarisation_llm.get_llm() - self.memory = MemorySaver() - self.len_memory = len_memory - - # Define a new graph for the conversation - self.workflow = StateGraph(State) - self.workflow_definition() - self.app = self.workflow.compile(checkpointer=self.memory) - - - def call_model(self, state: State, config: RunnableConfig) -> str: - - # Unwrap the config - self.session_id = config["configurable"].get("thread_id") - self.prompt_prefix = config["configurable"].get("prompt_prefix") - - summary = state.get("summary", "") - if summary: - system_message = f"Summary of conversation earlier: {summary}" - messages = [SystemMessage(content=system_message)] + state['messages'] - else: - messages = state["messages"] - - valid_messages = self.check_for_valid_messages(messages) - response = self.chat_llm.invoke(valid_messages) - - # We return a list, because this will get added to the existing list - return {"messages": [response]} - - # We now define the logic for determining whether to end or summarize the conversation - def should_continue(self, state: State) -> Literal["summarize_conversation", END]: - """Return the next node to execute.""" - messages = state["messages"] - valid_messages = self.check_for_valid_messages(messages) - nr_messages = len(valid_messages) - if "system" in valid_messages[-1].type: - nr_messages -= 1 - # If there are more than X messages, then we summarize the conversation - if nr_messages > self.len_memory: - return "summarize_conversation" - # Otherwise we can just end - return END - - def summarize_conversation(self, state: State) -> dict: - # First, we summarize the conversation - summary = state.get("summary", "") - if summary: - # If a summary already exists, we use a different system prompt - # to summarize it than if one didn't - summary_message = ( - f"This is summary of the conversation to date: {summary}\n\n" - "Update the summary by taking into account the new messages above:" - ) - else: - # summary_message = "Create a summary of the conversation above:" - summary_message = "Identify the key conversational preferences of the human user in the conversation above. Do not to focus on the details of the conversation:" - - messages = state["messages"] + [SystemMessage(content=summary_message)] # instead of HumanMessage - valid_messages = self.check_for_valid_messages(messages) - response = self.summarisation_llm.invoke(valid_messages) - - # We now need to delete messages that we no longer want to show up - # I will delete all but the last two messages, but you can change this - delete_messages = [RemoveMessage(id=m.id) for m in state["messages"][:-2]] - return {"summary": response.content, "messages": delete_messages} - - def check_for_valid_messages(self, messages: list[AllMessageTypes]) -> list[ValidMessageTypes]: - # Removing the RemoveMessage() from the list of messages - valid_messages = [] - for message in messages: - if message.type != 'remove': - valid_messages.append(message) - return valid_messages - - def get_summary(self, config: RunnableConfig) -> str: - return self.app.get_state(config).values['summary'] if 'summary' in self.app.get_state(config).values else [] - - def workflow_definition(self): - # Define the conversation node and the summarize node - self.workflow.add_node("conversation", self.call_model) - self.workflow.add_node(self.summarize_conversation) - # Set the entrypoint as conversation - self.workflow.add_edge(START, "conversation") - - # We now add a conditional edge - self.workflow.add_conditional_edges( - # First, we define the start node. We use `conversation`. - # This means these are the edges taken after the `conversation` node is called. - "conversation", - # Next, we pass in the function that will determine which node is called next. - self.should_continue, - ) - - # We now add a normal edge from `summarize_conversation` to END. - # This means that after `summarize_conversation` is called, we end. - self.workflow.add_edge("summarize_conversation", END) - - def print_update(self, update: dict): - for k, v in update.items(): - for m in v["messages"]: - m.pretty_print() - if "summary" in v: - print(v["summary"]) - - def pretty_response_value(self, event: dict) -> str: - # print(event["messages"][-1]) - return event["messages"][-1].content - - -# if __name__ == "__main__": -# # TESTING -# chatbot_agent = ChatbotAgent() -# from IPython.display import Image, display - -# try: -# display(Image(chatbot_agent.app.get_graph().draw_mermaid_png())) -# except Exception: -# # This requires some extra dependencies and is optional -# print("Could not display the graph.") -# pass - -# config = {"configurable": {"thread_id": "1"}} - -# while True: -# user_input = input("User: ") -# if user_input.lower() in ["quit", "q"]: -# print("Goodbye!") -# break - -# events = chatbot_agent.app.stream( -# {"messages": [("user", user_input)]}, config, stream_mode="updates" -# ) -# updates= chatbot_agent.app.get_state(config).values["messages"] -# print(f"DEBUGGING: {updates}") -# print(f"DEBUGGING: {chatbot_agent.get_summary(config)}") -# for event in events: -# chatbot_agent.print_update(event) -# # event["messages"][-1].pretty_print() \ No newline at end of file diff --git a/src/agents/profiling_agent.py b/src/agents/development_agents/internal_summary_memory_agent.py similarity index 79% rename from src/agents/profiling_agent.py rename to src/agents/development_agents/internal_summary_memory_agent.py index cd6c742..7ebdf28 100644 --- a/src/agents/profiling_agent.py +++ b/src/agents/development_agents/internal_summary_memory_agent.py @@ -1,21 +1,23 @@ try: - from .llm_factory import OpenAILLMs + from ..llm_factory import OpenAILLMs + from ..utils.types import InvokeAgentResponseType except ImportError: from src.agents.llm_factory import OpenAILLMs + from src.agents.utils.types import InvokeAgentResponseType + from langchain_core.messages import SystemMessage, RemoveMessage, HumanMessage, AIMessage from langgraph.checkpoint.memory import MemorySaver from langgraph.graph import MessagesState, StateGraph, START, END from langchain_core.runnables.config import RunnableConfig -from typing import TypeAlias +from typing import TypeAlias, Any, Dict -# TYPES ValidMessageTypes: TypeAlias = SystemMessage | HumanMessage | AIMessage AllMessageTypes: TypeAlias = ValidMessageTypes | RemoveMessage class State(MessagesState): summary: str -class ProfilingAgent: +class InternalSummaryMemoryAgent: def __init__(self): summarisation_llm = OpenAILLMs() self.summarisation_llm = summarisation_llm.get_llm() @@ -84,6 +86,24 @@ def fetch_latest_history(self, state: State, config: RunnableConfig) -> dict: return {"messages": [HumanMessage(content=history_summarisation)]} +agent = InternalSummaryMemoryAgent() +def invoke_profiling_agent_with_retry(session_id: str) -> InvokeAgentResponseType: + """ + Retry the profiling agent if a tool fails to run. + This can help when there are intermittent connection issues to external APIs. + """ + print(f'in invoke_profiling_agent_with_retry(), session_id = {session_id}') + + config = {"configurable": {"thread_id": session_id}} + response_events = agent.app.invoke({"messages": []}, config=config, stream_mode="values") + pretty_printed_response = agent.pretty_response_value(response_events) # get last event/ai answer in the response + + return { + "input": "History of the conversation", + "output": pretty_printed_response, + "intermediate_steps": [] + } + # if __name__ == "__main__": # # TESTING # agent = ProfilingAgent() diff --git a/src/agents/no_summary_no_memory_agent.py b/src/agents/development_agents/no_summary_no_memory_agent.py similarity index 74% rename from src/agents/no_summary_no_memory_agent.py rename to src/agents/development_agents/no_summary_no_memory_agent.py index cdc35da..2356601 100644 --- a/src/agents/no_summary_no_memory_agent.py +++ b/src/agents/development_agents/no_summary_no_memory_agent.py @@ -1,12 +1,15 @@ try: - from .llm_factory import OpenAILLMs + from ..llm_factory import OpenAILLMs + from ..utils.types import InvokeAgentResponseType except ImportError: from src.agents.llm_factory import OpenAILLMs + from src.agents.utils.types import InvokeAgentResponseType + from langgraph.graph import StateGraph, START, END from langchain_core.messages import SystemMessage, RemoveMessage, HumanMessage, AIMessage from langchain_core.runnables.config import RunnableConfig from langgraph.graph.message import add_messages -from typing import Annotated, TypeAlias +from typing import Annotated, TypeAlias, Any, Dict from typing_extensions import TypedDict # TYPES @@ -63,6 +66,23 @@ def pretty_response_value(self, event: dict) -> str: return event["messages"][-1].content +agent = ChatbotNoSummaryNoMemoryAgent() +def invoke_agent_no_summary_no_memory(query: str, conversation_history: list, session_id: str) -> InvokeAgentResponseType: + """ + Call an agent that has no conversation memeory and expects to receive all past messages in the params and the latest human request in the query. + """ + print(f'in invoke_agent_no_summary_no_memory(), query = {query}, thread_id = {session_id}') + + config = {"configurable": {"thread_id": session_id}} + response_events = agent.app.invoke({"messages": conversation_history + [HumanMessage(content=query)]}, config=config, stream_mode="values") + pretty_printed_response = agent.pretty_response_value(response_events) # for last event in the response + + return { + "input": query, + "output": pretty_printed_response, + "intermediate_steps": [] + } + # if __name__ == "__main__": # # TESTING # agent = ChatbotNoSummaryNoMemoryAgent() diff --git a/src/agents/informational_agent/informational_agent.py b/src/agents/informational_agent/informational_agent.py new file mode 100644 index 0000000..4876221 --- /dev/null +++ b/src/agents/informational_agent/informational_agent.py @@ -0,0 +1,195 @@ +try: + from ..llm_factory import OpenAILLMs + from .informational_prompts import \ + informational_role_prompt, conv_pref_prompt, update_conv_pref_prompt, summary_prompt, update_summary_prompt + from ..utils.types import InvokeAgentResponseType +except ImportError: + from src.agents.llm_factory import OpenAILLMs + from src.agents.informational_agent.informational_prompts import \ + informational_role_prompt, conv_pref_prompt, update_conv_pref_prompt, summary_prompt, update_summary_prompt + from src.agents.utils.types import InvokeAgentResponseType + +from langgraph.graph import StateGraph, START, END +from langchain_core.messages import SystemMessage, RemoveMessage, HumanMessage, AIMessage +from langchain_core.runnables.config import RunnableConfig +from langgraph.graph.message import add_messages +from typing import Annotated, TypeAlias +from typing_extensions import TypedDict + +""" +Based on the base_agent [LLM workflow with a summarisation, profiling, and chat agent that receives an external conversation history]. + +This agent is designed to: +- [summarise_prompt] summarise the conversation after 'max_messages_to_summarize' number of messages is reached in the conversation +- [conv_pref_prompt] analyse the conversation style of the student +- [informational_role_prompt] role of a tutor to answer student's ALL questions on any topic +""" + +ValidMessageTypes: TypeAlias = SystemMessage | HumanMessage | AIMessage +AllMessageTypes: TypeAlias = ValidMessageTypes | RemoveMessage + +class State(TypedDict): + messages: Annotated[list[AllMessageTypes], add_messages] + summary: str + conversationalStyle: str + +class InformationalAgent: + def __init__(self): + llm = OpenAILLMs() + self.llm = llm.get_llm() + summarisation_llm = OpenAILLMs() + self.summarisation_llm = summarisation_llm.get_llm() + self.summary = "" + self.conversationalStyle = "" + + # Define Agent's specific Parameters + self.max_messages_to_summarize = 11 + self.role_prompt = informational_role_prompt + self.summary_prompt = summary_prompt + self.update_summary_prompt = update_summary_prompt + self.conversation_preference_prompt = conv_pref_prompt + self.update_conversation_preference_prompt = update_conv_pref_prompt + + # Define a new graph for the conversation & compile it + self.workflow = StateGraph(State) + self.workflow_definition() + self.app = self.workflow.compile() + + def call_model(self, state: State, config: RunnableConfig) -> str: + """Call the LLM model knowing the role system prompt, the summary and the conversational style.""" + + # Default AI tutor role prompt + system_message = self.role_prompt + + # Adding external student progress and question context details from data queries + question_response_details = config["configurable"].get("question_response_details", "") + if question_response_details: + system_message += f"## Known Question Materials: {question_response_details} \n\n" + + # Adding summary and conversational style to the system message + summary = state.get("summary", "") + conversationalStyle = state.get("conversationalStyle", "") + if summary: + system_message += f"## Summary of conversation earlier: {summary} \n\n" + if conversationalStyle: + system_message += f"## Known conversational style and preferences of the student for this conversation: {conversationalStyle}. \n\nYour answer must be in line with this conversational style." + + messages = [SystemMessage(content=system_message)] + state['messages'] + + valid_messages = self.check_for_valid_messages(messages) + response = self.llm.invoke(valid_messages) + + # Save summary for fetching outside the class + self.summary = summary + self.conversationalStyle = conversationalStyle + + return {"summary": summary, "messages": [response]} + + def check_for_valid_messages(self, messages: list[AllMessageTypes]) -> list[ValidMessageTypes]: + """ Removing the RemoveMessage() from the list of messages """ + + valid_messages: list[ValidMessageTypes] = [] + for message in messages: + if message.type != 'remove': + valid_messages.append(message) + return valid_messages + + def summarize_conversation(self, state: State, config: RunnableConfig) -> dict: + """Summarize the conversation.""" + + summary = state.get("summary", "") + previous_summary = config["configurable"].get("summary", "") + previous_conversationalStyle = config["configurable"].get("conversational_style", "") + if previous_summary: + summary = previous_summary + + if summary: + summary_message = ( + f"This is summary of the conversation to date: {summary}\n\n" + + self.update_summary_prompt + ) + else: + summary_message = self.summary_prompt + + if previous_conversationalStyle: + conversationalStyle_message = ( + f"This is the previous conversational style of the student for this conversation: {previous_conversationalStyle}\n\n" + + self.update_conversation_preference_prompt + ) + else: + conversationalStyle_message = self.conversation_preference_prompt + + # STEP 1: Summarize the conversation + messages = state["messages"][:-1] + [SystemMessage(content=summary_message)] + valid_messages = self.check_for_valid_messages(messages) + summary_response = self.summarisation_llm.invoke(valid_messages) + + # STEP 2: Analyze the conversational style + messages = state["messages"][:-1] + [SystemMessage(content=conversationalStyle_message)] + valid_messages = self.check_for_valid_messages(messages) + conversationalStyle_response = self.summarisation_llm.invoke(valid_messages) + + # Delete messages that are no longer wanted, except the last ones + delete_messages: list[AllMessageTypes] = [RemoveMessage(id=m.id) for m in state["messages"][:-3]] + + return {"summary": summary_response.content, "conversationalStyle": conversationalStyle_response.content, "messages": delete_messages} + + def should_summarize(self, state: State) -> str: + """ + Return the next node to execute. + If there are more than X messages, then we summarize the conversation. + Otherwise, we call the LLM. + """ + + messages = state["messages"] + valid_messages = self.check_for_valid_messages(messages) + nr_messages = len(valid_messages) + if "system" in valid_messages[-1].type: + nr_messages -= 1 + + # always pairs of (sent, response) + 1 latest message + if nr_messages > self.max_messages_to_summarize: + return "summarize_conversation" + return "call_llm" + + def workflow_definition(self) -> None: + self.workflow.add_node("call_llm", self.call_model) + self.workflow.add_node("summarize_conversation", self.summarize_conversation) + + self.workflow.add_conditional_edges(source=START, path=self.should_summarize) + self.workflow.add_edge("summarize_conversation", "call_llm") + self.workflow.add_edge("call_llm", END) + + def get_summary(self) -> str: + return self.summary + + def get_conversational_style(self) -> str: + return self.conversationalStyle + + def print_update(self, update: dict) -> None: + for k, v in update.items(): + for m in v["messages"]: + m.pretty_print() + if "summary" in v: + print(v["summary"]) + + def pretty_response_value(self, event: dict) -> str: + return event["messages"][-1].content + +agent = InformationalAgent() +def invoke_informational_agent(query: str, conversation_history: list, summary: str, conversationalStyle: str, question_response_details: str, session_id: str) -> InvokeAgentResponseType: + print(f'in invoke_informational_agent(), query = {query}, thread_id = {session_id}') + + config = {"configurable": {"thread_id": session_id, "summary": summary, "conversational_style": conversationalStyle, "question_response_details": question_response_details}} + response_events = agent.app.invoke({"messages": conversation_history + [HumanMessage(content=query)]}, config=config, stream_mode="values") #updates + pretty_printed_response = agent.pretty_response_value(response_events) # get last event/ai answer in the response + + # Gather Metadata from the agent + summary = agent.get_summary() + conversationalStyle = agent.get_conversational_style() + + return { + "input": query, + "output": pretty_printed_response, + "intermediate_steps": [str(summary), conversationalStyle, conversation_history] + } \ No newline at end of file diff --git a/src/agents/informational_agent/informational_prompts.py b/src/agents/informational_agent/informational_prompts.py new file mode 100644 index 0000000..9f5849c --- /dev/null +++ b/src/agents/informational_agent/informational_prompts.py @@ -0,0 +1,75 @@ +# PROMPTS generated with the help of ChatGPT GPT-4o Nov 2024 +# Removed from role prompt: +# If I ask about a topic that is irrelevant, then say 'I'm not familiar with that topic, but I can help you with the [topic]. + +informational_role_prompt = "You are an excellent tutor that aims to provide clear and concise explanations to students. I am the student. Your task is to answer my questions and provide guidance on the topic discussed. Ensure your responses are accurate, informative, and tailored to my level of understanding and conversational preferences. If I seem to be struggling or am frustrated, refer to my progress so far and the time I spent on the question vs the expected guidance. You do not need to end your messages with a concluding statement.\n\n" + +pref_guidelines = """**Guidelines:** +- Use concise, objective language. +- Note the student's educational goals, such as understanding foundational concepts, passing an exam, getting top marks, code implementation, hands-on practice, etc. +- Note any specific preferences in how the student learns, such as asking detailed questions, seeking practical examples, requesting quizes, requesting clarifications, etc. +- Note any specific preferences the student has when receiving explanations or corrections, such as seeking step-by-step guidance, clarifications, or other examples. +- Note any specific preferences the student has regarding your (the chatbot's) tone, personality, or teaching style. +- Avoid assumptions about motivation; observe only patterns evident in the conversation. +- If no particular preference is detectable, state "No preference observed." +""" + +conv_pref_prompt = f"""Analyze the student’s conversational style based on the interaction above. Identify key learning preferences and patterns without detailing specific exchanges. Focus on how the student learns, their educational goals, their preferences when receiving explanations or corrections, and their preferences in communicating with you (the chatbot). Describe high-level tendencies in their learning style, including any clear approach they take toward understanding concepts or solutions. + +{pref_guidelines} + +Examples: + +Example 1: +**Conversation:** +Student: "I understand that the derivative gives us the slope of a function, but what if we want to know the rate of change over an interval? Do we still use the derivative?" +AI: "Good question! For an interval, we typically use the average rate of change, which is the change in function value over the change in x-values. The derivative gives the instantaneous rate of change at a specific point." + +**Expected Answer:** +The student prefers in-depth conceptual understanding and asks thoughtful questions that differentiate between similar concepts. They seem comfortable discussing foundational ideas in calculus. + +Example 2: +**Conversation:** +Student: "I’m trying to solve this physics problem: if I throw a ball upwards at 10 m/s, how long will it take to reach the top? I thought I could just divide by gravity, but I’m not sure." +AI: "You're on the right track! Since acceleration due to gravity is 9.8 m/s², you can divide the initial velocity by gravity to find the time to reach the peak, which would be around 1.02 seconds." + +**Expected Answer:** +The student prefers practical problem-solving and is open to corrections. They often attempt a solution before seeking guidance. + +Example 3: +**Conversation:** +Student: "Can you explain the difference between meiosis and mitosis? I know both involve cell division, but I’m confused about how they differ." +AI: "Certainly! Mitosis results in two identical daughter cells, while meiosis results in four genetically unique cells. Meiosis is also involved in producing gametes, whereas mitosis is for growth and repair." + +**Expected Answer:** +The student prefers clear, comparative explanations when learning complex biological processes. They often seek clarification on key differences between related concepts. + +Example 4: +**Conversation:** +Student: "I wrote this Python code to reverse a string, but it’s not working. Here’s what I tried: `for char in string: new_string = char + new_string`." +AI: "You’re close! Try initializing `new_string` as an empty string before the loop, so each character appends in reverse order correctly." + +**Expected Answer:** +The student prefers hands-on guidance with code, often sharing specific code snippets. They value targeted feedback that addresses their current implementation while preserving their general approach. + +""" + +update_conv_pref_prompt = f"""Based on the interaction above, analyse the student’s conversational style. Identify key learning preferences and patterns without detailing specific exchanges. Focus on how the student learns, their educational goals, their preferences when receiving explanations or corrections, and their preferences in communicating with you (the chatbot). Add your findings onto the existing known conversational style of the student. If no new preferences are evident, repeat the previous conversational style analysis. + +{pref_guidelines} +""" + +summary_prompt = """ +You are an AI assistant specializing in concise and accurate summarization. Your task is to summarize the previous conversation, capturing the main topics, key points, user questions, and your responses in a clear and organized format. + +Ensure the summary is: + +Concise: Keep the summary brief while including all essential information. +Structured: Organize the summary into sections such as 'Topics Discussed' and 'Key Questions and Responses'. +Neutral and Accurate: Avoid adding interpretations or opinions; focus only on the content shared. +When summarizing: If the conversation is technical, highlight significant concepts, solutions, and terminology. If context involves problem-solving, detail the problem and the steps or solutions provided. If the user asks for creative input, briefly describe the ideas presented. + +Provide the summary in a bulleted format for clarity. Avoid redundant details while preserving the core intent of the discussion. +""" + +update_summary_prompt = "Update the summary by taking into account the new messages above:" \ No newline at end of file diff --git a/src/agents/socratic_agent/socratic_agent.py b/src/agents/socratic_agent/socratic_agent.py new file mode 100644 index 0000000..f51e0ba --- /dev/null +++ b/src/agents/socratic_agent/socratic_agent.py @@ -0,0 +1,195 @@ +try: + from ..llm_factory import OpenAILLMs + from .socratic_prompts import \ + socratic_role_prompt, conv_pref_prompt, update_conv_pref_prompt, summary_prompt, update_summary_prompt + from ..utils.types import InvokeAgentResponseType +except ImportError: + from src.agents.llm_factory import OpenAILLMs + from src.agents.socratic_agent.socratic_prompts import \ + socratic_role_prompt, conv_pref_prompt, update_conv_pref_prompt, summary_prompt, update_summary_prompt + from src.agents.utils.types import InvokeAgentResponseType + +from langgraph.graph import StateGraph, START, END +from langchain_core.messages import SystemMessage, RemoveMessage, HumanMessage, AIMessage +from langchain_core.runnables.config import RunnableConfig +from langgraph.graph.message import add_messages +from typing import Annotated, TypeAlias +from typing_extensions import TypedDict + +""" +Based on the base_agent [LLM workflow with a summarisation, profiling, and chat agent that receives an external conversation history]. + +This agent is designed to: +- [summarise_prompt] summarise the conversation after 'max_messages_to_summarize' number of messages is reached in the conversation +- [conv_pref_prompt] analyse the conversation style of the student +- [socratic_role_prompt] follow a Socratic Method in the conversation that never releases the answer to the user +""" + +ValidMessageTypes: TypeAlias = SystemMessage | HumanMessage | AIMessage +AllMessageTypes: TypeAlias = ValidMessageTypes | RemoveMessage + +class State(TypedDict): + messages: Annotated[list[AllMessageTypes], add_messages] + summary: str + conversationalStyle: str + +class SocraticAgent: + def __init__(self): + llm = OpenAILLMs() + self.llm = llm.get_llm() + summarisation_llm = OpenAILLMs() + self.summarisation_llm = summarisation_llm.get_llm() + self.summary = "" + self.conversationalStyle = "" + + # Define Agent's specific Parameters + self.max_messages_to_summarize = 11 + self.role_prompt = socratic_role_prompt + self.summary_prompt = summary_prompt + self.update_summary_prompt = update_summary_prompt + self.conversation_preference_prompt = conv_pref_prompt + self.update_conversation_preference_prompt = update_conv_pref_prompt + + # Define a new graph for the conversation & compile it + self.workflow = StateGraph(State) + self.workflow_definition() + self.app = self.workflow.compile() + + def call_model(self, state: State, config: RunnableConfig) -> str: + """Call the LLM model knowing the role system prompt, the summary and the conversational style.""" + + # Default AI tutor role prompt + system_message = self.role_prompt + + # Adding external student progress and question context details from data queries + question_response_details = config["configurable"].get("question_response_details", "") + if question_response_details: + system_message += f"## Known Question Materials: {question_response_details} \n\n" + + # Adding summary and conversational style to the system message + summary = state.get("summary", "") + conversationalStyle = state.get("conversationalStyle", "") + if summary: + system_message += f"## Summary of conversation earlier: {summary} \n\n" + if conversationalStyle: + system_message += f"## Known conversational style and preferences of the student for this conversation: {conversationalStyle}. \n\nYour answer must be in line with this conversational style." + + messages = [SystemMessage(content=system_message)] + state['messages'] + + valid_messages = self.check_for_valid_messages(messages) + response = self.llm.invoke(valid_messages) + + # Save summary for fetching outside the class + self.summary = summary + self.conversationalStyle = conversationalStyle + + return {"summary": summary, "messages": [response]} + + def check_for_valid_messages(self, messages: list[AllMessageTypes]) -> list[ValidMessageTypes]: + """ Removing the RemoveMessage() from the list of messages """ + + valid_messages: list[ValidMessageTypes] = [] + for message in messages: + if message.type != 'remove': + valid_messages.append(message) + return valid_messages + + def summarize_conversation(self, state: State, config: RunnableConfig) -> dict: + """Summarize the conversation.""" + + summary = state.get("summary", "") + previous_summary = config["configurable"].get("summary", "") + previous_conversationalStyle = config["configurable"].get("conversational_style", "") + if previous_summary: + summary = previous_summary + + if summary: + summary_message = ( + f"This is summary of the conversation to date: {summary}\n\n" + + self.update_summary_prompt + ) + else: + summary_message = self.summary_prompt + + if previous_conversationalStyle: + conversationalStyle_message = ( + f"This is the previous conversational style of the student for this conversation: {previous_conversationalStyle}\n\n" + + self.update_conversation_preference_prompt + ) + else: + conversationalStyle_message = self.conversation_preference_prompt + + # STEP 1: Summarize the conversation + messages = state["messages"][:-1] + [SystemMessage(content=summary_message)] + valid_messages = self.check_for_valid_messages(messages) + summary_response = self.summarisation_llm.invoke(valid_messages) + + # STEP 2: Analyze the conversational style + messages = state["messages"][:-1] + [SystemMessage(content=conversationalStyle_message)] + valid_messages = self.check_for_valid_messages(messages) + conversationalStyle_response = self.summarisation_llm.invoke(valid_messages) + + # Delete messages that are no longer wanted, except the last ones + delete_messages: list[AllMessageTypes] = [RemoveMessage(id=m.id) for m in state["messages"][:-3]] + + return {"summary": summary_response.content, "conversationalStyle": conversationalStyle_response.content, "messages": delete_messages} + + def should_summarize(self, state: State) -> str: + """ + Return the next node to execute. + If there are more than 'max_messages_to_summarize' messages, then we summarize the conversation. + Otherwise, we call the LLM. + """ + + messages = state["messages"] + valid_messages = self.check_for_valid_messages(messages) + nr_messages = len(valid_messages) + if "system" in valid_messages[-1].type: + nr_messages -= 1 + + # always pairs of (sent, response) + 1 latest message + if nr_messages > self.max_messages_to_summarize: + return "summarize_conversation" + return "call_llm" + + def workflow_definition(self) -> None: + self.workflow.add_node("call_llm", self.call_model) + self.workflow.add_node("summarize_conversation", self.summarize_conversation) + + self.workflow.add_conditional_edges(source=START, path=self.should_summarize) + self.workflow.add_edge("summarize_conversation", "call_llm") + self.workflow.add_edge("call_llm", END) + + def get_summary(self) -> str: + return self.summary + + def get_conversational_style(self) -> str: + return self.conversationalStyle + + def print_update(self, update: dict) -> None: + for k, v in update.items(): + for m in v["messages"]: + m.pretty_print() + if "summary" in v: + print(v["summary"]) + + def pretty_response_value(self, event: dict) -> str: + return event["messages"][-1].content + +agent = SocraticAgent() +def invoke_socratic_agent(query: str, conversation_history: list, summary: str, conversationalStyle: str, question_response_details: str, session_id: str) -> InvokeAgentResponseType: + print(f'in invoke_socratic_agent(), query = {query}, thread_id = {session_id}') + + config = {"configurable": {"thread_id": session_id, "summary": summary, "conversational_style": conversationalStyle, "question_response_details": question_response_details}} + response_events = agent.app.invoke({"messages": conversation_history + [HumanMessage(content=query)]}, config=config, stream_mode="values") #updates + pretty_printed_response = agent.pretty_response_value(response_events) # get last event/ai answer in the response + + # Gather Metadata from the agent + summary = agent.get_summary() + conversationalStyle = agent.get_conversational_style() + + return { + "input": query, + "output": pretty_printed_response, + "intermediate_steps": [str(summary), conversationalStyle, conversation_history] + } \ No newline at end of file diff --git a/src/agents/socratic_agent/socratic_prompts.py b/src/agents/socratic_agent/socratic_prompts.py new file mode 100644 index 0000000..4b7e57a --- /dev/null +++ b/src/agents/socratic_agent/socratic_prompts.py @@ -0,0 +1,73 @@ +# PROMPTS generated with the help of ChatGPT GPT-4o Nov 2024 + +socratic_role_prompt = "You are an excellent tutor, guiding me through the topic with clear and concise explanations. Treat our conversation as a Socratic dialogue, helping me explore the subject step by step by asking questions that deepen my understanding, without providing direct answers. Ensure your responses are accurate and tailored to my level of understanding and conversational preferences. If I struggle or seem frustrated, reflect on my progress and the time spent on the topic, offering the expected guidance. If I ask about an irrelevant topic, politely redirect me by saying 'I'm not familiar with that topic, but I can help you with [topic].' Do not end your responses with a concluding statement.\n\n" + +pref_guidelines = """**Guidelines:** +- Use concise, objective language. +- Note the student's educational goals, such as understanding foundational concepts, passing an exam, getting top marks, code implementation, hands-on practice, etc. +- Note any specific preferences in how the student learns, such as asking detailed questions, seeking practical examples, requesting quizes, requesting clarifications, etc. +- Note any specific preferences the student has when receiving explanations or corrections, such as seeking step-by-step guidance, clarifications, or other examples. +- Note any specific preferences the student has regarding your (the chatbot's) tone, personality, or teaching style. +- Avoid assumptions about motivation; observe only patterns evident in the conversation. +- If no particular preference is detectable, state "No preference observed." +""" + +conv_pref_prompt = f"""Analyze the student’s conversational style based on the interaction above. Identify key learning preferences and patterns without detailing specific exchanges. Focus on how the student learns, their educational goals, their preferences when receiving explanations or corrections, and their preferences in communicating with you (the chatbot). Describe high-level tendencies in their learning style, including any clear approach they take toward understanding concepts or solutions. + +{pref_guidelines} + +Examples: + +Example 1: +**Conversation:** +Student: "I understand that the derivative gives us the slope of a function, but what if we want to know the rate of change over an interval? Do we still use the derivative?" +AI: "Good question! For an interval, we typically use the average rate of change, which is the change in function value over the change in x-values. The derivative gives the instantaneous rate of change at a specific point." + +**Expected Answer:** +The student prefers in-depth conceptual understanding and asks thoughtful questions that differentiate between similar concepts. They seem comfortable discussing foundational ideas in calculus. + +Example 2: +**Conversation:** +Student: "I’m trying to solve this physics problem: if I throw a ball upwards at 10 m/s, how long will it take to reach the top? I thought I could just divide by gravity, but I’m not sure." +AI: "You're on the right track! Since acceleration due to gravity is 9.8 m/s², you can divide the initial velocity by gravity to find the time to reach the peak, which would be around 1.02 seconds." + +**Expected Answer:** +The student prefers practical problem-solving and is open to corrections. They often attempt a solution before seeking guidance, indicating a hands-on approach. + +Example 3: +**Conversation:** +Student: "Can you explain the difference between meiosis and mitosis? I know both involve cell division, but I’m confused about how they differ." +AI: "Certainly! Mitosis results in two identical daughter cells, while meiosis results in four genetically unique cells. Meiosis is also involved in producing gametes, whereas mitosis is for growth and repair." + +**Expected Answer:** +The student prefers clear, comparative explanations when learning complex biological processes. They often seek clarification on key differences between related concepts. + +Example 4: +**Conversation:** +Student: "I wrote this Python code to reverse a string, but it’s not working. Here’s what I tried: `for char in string: new_string = char + new_string`." +AI: "You’re close! Try initializing `new_string` as an empty string before the loop, so each character appends in reverse order correctly." + +**Expected Answer:** +The student prefers hands-on guidance with code, often sharing specific code snippets. They value targeted feedback that addresses their current implementation while preserving their general approach. + +""" + +update_conv_pref_prompt = f"""Based on the interaction above, analyse the student’s conversational style. Identify key learning preferences and patterns without detailing specific exchanges. Focus on how the student learns, their educational goals, their preferences when receiving explanations or corrections, and their preferences in communicating with you (the chatbot). Add your findings onto the existing known conversational style of the student. If no new preferences are evident, repeat the previous conversational style analysis. + +{pref_guidelines} +""" + +summary_prompt = """ +You are an AI assistant specializing in concise and accurate summarization. Your task is to summarize the previous conversation, capturing the main topics, key points, user questions, and your responses in a clear and organized format. + +Ensure the summary is: + +Concise: Keep the summary brief while including all essential information. +Structured: Organize the summary into sections such as 'Topics Discussed,' 'Key Questions and Responses,' and 'Follow-Up Suggestions' if applicable. +Neutral and Accurate: Avoid adding interpretations or opinions; focus only on the content shared. +When summarizing: If the conversation is technical, highlight significant concepts, solutions, and terminology. If context involves problem-solving, detail the problem and the steps or solutions provided. If the user asks for creative input, briefly describe the ideas presented. + +Provide the summary in a bulleted format for clarity. Avoid redundant details while preserving the core intent of the discussion. +""" + +update_summary_prompt = "Update the summary by taking into account the new messages above:" \ No newline at end of file diff --git a/src/agents/data/example_input_1.json b/src/agents/utils/example_inputs/example_input_1.json similarity index 95% rename from src/agents/data/example_input_1.json rename to src/agents/utils/example_inputs/example_input_1.json index 09a94a9..edc3eb5 100644 --- a/src/agents/data/example_input_1.json +++ b/src/agents/utils/example_inputs/example_input_1.json @@ -13,11 +13,6 @@ "type": "assistant", "content": "Hi! How can I help you with your question or any topic you're working on?" }, - { "type": "user", "content": "say bye" }, - { - "type": "assistant", - "content": "Sorry, I'm not familiar with that topic, but I can help you with the dot product question." - }, { "type": "user", "content": "mock" } ], "summary": "", diff --git a/src/agents/data/example_input_2.json b/src/agents/utils/example_inputs/example_input_2.json similarity index 100% rename from src/agents/data/example_input_2.json rename to src/agents/utils/example_inputs/example_input_2.json diff --git a/src/agents/data/example_input_3.json b/src/agents/utils/example_inputs/example_input_3.json similarity index 100% rename from src/agents/data/example_input_3.json rename to src/agents/utils/example_inputs/example_input_3.json diff --git a/src/agents/utils/test_prompts.py b/src/agents/utils/test_prompts.py index 555075b..176d1fb 100644 --- a/src/agents/utils/test_prompts.py +++ b/src/agents/utils/test_prompts.py @@ -4,12 +4,16 @@ import json try: - from .module import chat_module, invoke_agent_no_memory + from ..base_agent.base_agent import invoke_base_agent + from ..informational_agent.informational_agent import invoke_informational_agent + from ..socratic_agent.socratic_agent import invoke_socratic_agent except ImportError: - from src.module import chat_module, invoke_agent_no_memory + from src.agents.base_agent.base_agent import invoke_base_agent + from src.agents.informational_agent.informational_agent import invoke_informational_agent + from src.agents.socratic_agent.socratic_agent import invoke_socratic_agent # File path for the input text -path = "src/agents/data/" +path = "src/agents/utils/example_inputs/" input_file = path + "example_input_3.json" # Step 1: Read the input file @@ -43,6 +47,8 @@ conversationalStyle = params["conversational_style"] if "question_response_details" in params: question_response_details = params["question_response_details"] + if "agent_type" in params: + agent_type = params["agent_type"] if "conversation_id" in params: conversation_id = params["conversation_id"] else: @@ -51,12 +57,25 @@ """ STEP 3: Call the LLM agent to get a response to the user's message """ - response = invoke_agent_no_memory(query=message, \ - conversation_history=conversation_history, \ - summary=summary, \ - conversationalStyle=conversationalStyle, \ - question_response_details=question_response_details, \ - session_id=conversation_id) + # NOTE: ### SET the agent type to use ### + agent_type = "informational" + # NOTE: ################################# + + if agent_type == "socratic": + invoke = invoke_socratic_agent + elif agent_type == "informational": + invoke = invoke_informational_agent + else: + # default to 'base' + invoke = invoke_base_agent + + response = invoke(query=message, \ + conversation_history=conversation_history, \ + summary=summary, \ + conversationalStyle=conversationalStyle, \ + question_response_details=question_response_details, \ + session_id=conversation_id) + print("AI Response:", response) diff --git a/src/agents/utils/types.py b/src/agents/utils/types.py new file mode 100644 index 0000000..943e6a2 --- /dev/null +++ b/src/agents/utils/types.py @@ -0,0 +1,3 @@ +from typing import Any, Dict, TypeAlias + +InvokeAgentResponseType: TypeAlias = Dict[str, Any] diff --git a/src/module.py b/src/module.py index d1878a2..faf9d0b 100755 --- a/src/module.py +++ b/src/module.py @@ -1,24 +1,14 @@ -from typing import Any, Dict, TypeAlias -from langchain_core.messages import SystemMessage, RemoveMessage, HumanMessage, AIMessage +from typing import Any try: - from .agents.chatbot_summarised_memory_agent import ChatbotAgent - from .agents.profiling_agent import ProfilingAgent - from .agents.no_memory_agent import ChatbotNoMemoryAgent - from .agents.no_summary_no_memory_agent import ChatbotNoSummaryNoMemoryAgent from .module_response import Result, Params + from .agents.informational_agent.informational_agent import invoke_informational_agent + from .agents.socratic_agent.socratic_agent import invoke_socratic_agent except ImportError: - from src.agents.chatbot_summarised_memory_agent import ChatbotAgent - from src.agents.profiling_agent import ProfilingAgent - from src.agents.no_memory_agent import ChatbotNoMemoryAgent - from src.agents.no_summary_no_memory_agent import ChatbotNoSummaryNoMemoryAgent from src.module_response import Result, Params + from src.agents.informational_agent.informational_agent import invoke_informational_agent + from src.agents.socratic_agent.socratic_agent import invoke_socratic_agent import time -chatbot_agent = ChatbotAgent(len_memory=4) -profiling_agent = ProfilingAgent() -no_memory_agent = ChatbotNoMemoryAgent() -no_summary_no_memory_agent = ChatbotNoSummaryNoMemoryAgent() - def chat_module(message: Any, params: Params) -> Result: """ Function used by student to converse with a chatbot. @@ -27,7 +17,7 @@ def chat_module(message: Any, params: Params) -> Result: - `message` which is the message sent by the student. - `params` which are any extra parameters that may be useful, - e.g., conversation history and summary, conversational style of user, conversation id. + e.g., conversation history and summary, conversational style of user, conversation id, agent type. The output of this function is what is returned as the API response and therefore must be JSON-encodable. It must also conform to the @@ -48,6 +38,7 @@ def chat_module(message: Any, params: Params) -> Result: summary = "" conversationalStyle = "" question_response_details = "" + agent_type = "informational" # default if "include_test_data" in params: include_test_data = params["include_test_data"] @@ -59,19 +50,28 @@ def chat_module(message: Any, params: Params) -> Result: conversationalStyle = params["conversational_style"] if "question_response_details" in params: question_response_details = params["question_response_details"] + if "agent_type" in params: + agent_type = params["agent_type"] if "conversation_id" in params: conversation_id = params["conversation_id"] else: raise Exception("Internal Error: The conversation id is required in the parameters of the chat module.") start_time = time.time() + + if agent_type == "socratic": + invoke = invoke_socratic_agent + else: + # default to 'informational' + invoke = invoke_informational_agent + + chatbot_response = invoke(query=message, \ + conversation_history=conversation_history, \ + summary=summary, \ + conversationalStyle=conversationalStyle, \ + question_response_details=question_response_details, \ + session_id=conversation_id) - chatbot_response = invoke_agent_no_memory(query=message, \ - conversation_history=conversation_history, \ - summary=summary, \ - conversationalStyle=conversationalStyle, \ - question_response_details=question_response_details, \ - session_id=conversation_id) end_time = time.time() result._processing_time = end_time - start_time @@ -81,124 +81,4 @@ def chat_module(message: Any, params: Params) -> Result: result.add_metadata("conversation_history", chatbot_response["intermediate_steps"][2]) result.add_processing_time(end_time - start_time) - return result.to_dict(include_test_data=include_test_data) - - -# ######## INVOKE AGENTS ######## -# return type agents invoke -InvokeAgentResponseType: TypeAlias = Dict[str, Any] - -def invoke_agent_no_summary_no_memory(query: str, conversation_history: list, session_id: str) -> InvokeAgentResponseType: - """ - Call an agent that has no conversation memeory and expects to receive all past messages in the params and the latest human request in the query. - """ - print(f'in invoke_agent_no_summary_no_memory(), query = {query}, thread_id = {session_id}') - - config = {"configurable": {"thread_id": session_id}} - response_events = no_summary_no_memory_agent.app.invoke({"messages": conversation_history + [HumanMessage(content=query)]}, config=config, stream_mode="values") - pretty_printed_response = no_summary_no_memory_agent.pretty_response_value(response_events) # for last event in the response - - return { - "input": query, - "output": pretty_printed_response, - "intermediate_steps": [] - } - -def invoke_agent_no_memory(query: str, conversation_history: list, summary: str, conversationalStyle: str, question_response_details: str, session_id: str) -> InvokeAgentResponseType: - """ - Call an agent that has no conversation memory and expects to receive all past messages in the params and the latest human request in the query. - If conversation history longer than X, the agent will summarize the conversation and will provide a conversational style analysis. - """ - print(f'in invoke_agent_no_memory(), query = {query}, thread_id = {session_id}') - - config = {"configurable": {"thread_id": session_id, "summary": summary, "conversational_style": conversationalStyle, "question_response_details": question_response_details}} - response_events = no_memory_agent.app.invoke({"messages": conversation_history + [HumanMessage(content=query)]}, config=config, stream_mode="values") #updates - pretty_printed_response = no_memory_agent.pretty_response_value(response_events) # get last event/ai answer in the response - - # Gather Metadata from the agent - summary = no_memory_agent.get_summary() - conversationalStyle = no_memory_agent.get_conversational_style() - - return { - "input": query, - "output": pretty_printed_response, - "intermediate_steps": [str(summary), conversationalStyle, conversation_history] - } - -def invoke_simple_agent_with_retry(query: str, session_id: str, prompt_prefix: str = "") -> InvokeAgentResponseType: - """ - Retry the simple agent if a tool fails to run. - This can help when there are intermittent connection issues to external APIs. - """ - print(f'in invoke_simple_agent_with_retry(), query = {query}, thread_id = {session_id}') - - config = {"configurable": {"thread_id": session_id, "prompt_prefix": prompt_prefix}} - response_events = chatbot_agent.app.invoke({"messages": [HumanMessage(content=query)]}, config=config, stream_mode="values") - pretty_printed_response = chatbot_agent.pretty_response_value(response_events) # get last event/ai answer in the response - - # Gather Metadata from the agent - summary = chatbot_agent.get_summary(config) - nr_messages = len(chatbot_agent.app.get_state(config).values["messages"]) - nr_valid_messages = len([m for m in chatbot_agent.app.get_state(config).values["messages"] if m.type != "remove"]) - if "system" in chatbot_agent.app.get_state(config).values["messages"][-1].type: - nr_valid_messages -= 1 - nr_human_messages = len([m for m in chatbot_agent.app.get_state(config).values["messages"] if m.type == "human"]) - # NOTE: intermediate_steps is expected to be a list - intermediate_steps = ["Number of messages sent: "+ str(nr_human_messages), "Number of remembered messages:"+str(nr_valid_messages), "Number of total messages in the conversation: "+ str(nr_messages)] - if summary: - intermediate_steps.append("Summary: "+ str(summary)) - - return { - "input": query, - "output": pretty_printed_response, - "intermediate_steps": intermediate_steps - } - -def invoke_profiling_agent_with_retry(session_id: str) -> InvokeAgentResponseType: - """ - Retry the profiling agent if a tool fails to run. - This can help when there are intermittent connection issues to external APIs. - """ - print(f'in invoke_profiling_agent_with_retry(), session_id = {session_id}') - - config = {"configurable": {"thread_id": session_id}} - response_events = profiling_agent.app.invoke({"messages": []}, config=config, stream_mode="values") - pretty_printed_response = profiling_agent.pretty_response_value(response_events) # get last event/ai answer in the response - - return { - "input": "History of the conversation", - "output": pretty_printed_response, - "intermediate_steps": [] - } - -# if __name__ == "__main__": -# conversation_history = [ -# {"content": "Hi, in one word tell me about London.", "type": "human"}, -# {"content": "diverse", "type": "ai"}, -# {"content": "What about dogs?", "type": "human"}, -# {"content": "loyal", "type": "ai"}, -# {"content": "cats", "type": "human"}, -# {"content": "curious", "type": "ai"}, -# {"content": "Paris?", "type": "human"}, -# {"content": "romantic", "type": "ai"}, -# {"content": "What about the weather?", "type": "human"}, -# {"content": "unpredictable", "type": "ai"}, -# {"content": "food?", "type": "human"}, -# {"content": "delicious", "type": "ai"}, -# ] -# responses = ["what about birds?", "Berlin?"] - -# for message in responses: -# try: -# llm_response = chat_module(message, {"include_test_data": True, "conversation_history": conversation_history, "conversation_id": "test1234"}) - -# print(llm_response) -# print("AI: "+llm_response["chatbot_response"]) -# print("Summary: ") -# print(llm_response["metadata"]["summary"]) -# print("Conversational Style: ") -# print(llm_response["metadata"]["conversational_style"]) -# print("Processing time: " + str(llm_response["processing_time"])) -# print("--------------------") -# except Exception as e: -# print("An error occurred within the chat_module(): " + str(e)) \ No newline at end of file + return result.to_dict(include_test_data=include_test_data) \ No newline at end of file diff --git a/src/module_test.py b/src/module_test.py index 9e81833..f1441cf 100755 --- a/src/module_test.py +++ b/src/module_test.py @@ -26,7 +26,7 @@ class TestChatModuleFunction(unittest.TestCase): # TODO: update the test cases def test_missing_parameters(self): - # Checking state for missing parameters + # Checking state for missing parameters on default agent response, params = "Hello, World", Params() expected_params = Params(include_test_data=True, conversation_history=[], \ summary="", conversational_style="", \ @@ -59,13 +59,15 @@ def test_missing_parameters(self): self.assertTrue("Internal Error" in str(cm.exception)) self.assertTrue("conversation id" in str(cm.exception)) - def test_agent_output(self): + def test_all_agents_output(self): # Checking the output of the agents - response, params = "Hello, World", Params(conversation_id="1234Test") + agents = ["informational", "socratic"] + for agent in agents: + response, params = "Hello, World", Params(conversation_id="1234Test", agent_type=agent) - result = chat_module(response, params) + result = chat_module(response, params) - self.assertIsNotNone(result.get("chatbot_response")) + self.assertIsNotNone(result.get("chatbot_response")) def test_processing_time_calc(self): # Checking the processing time calculation