diff --git a/README.md b/README.md index 8a0d2f22..96a05b75 100644 --- a/README.md +++ b/README.md @@ -164,7 +164,7 @@ print(rag.invoke({ "question": "What are the documents applying for housing loan ### Making the RAG into tool -Making agentic RAG is easy in flo +Making agentic RAG tool is easy in flo ```python rag_tool = rag_builder diff --git a/examples/agent_of_flo_ai.ipynb b/examples/agent_of_flo_ai.ipynb new file mode 100644 index 00000000..26209ba9 --- /dev/null +++ b/examples/agent_of_flo_ai.ipynb @@ -0,0 +1,233 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# The Agents of FloAI\n", + "\n", + "This notebook shows different kinds of agents that can be built using flo-ai. These agents have their own specialities, and this notebook tries to explain them.\n", + "\n", + "Here are the major types of agents:\n", + "\n", + "1. Agentic Agents (`kind: agentic`): Any agent created in flo-ai by default tries to be an agentic agent. This agent always needs a tool, meaning an agent without a tool if specified as agentic agent whill throw an exception\n", + "\n", + "2. LLM Agents (`kind: llm`): These agents are simply an LLM which can answer any questions asked to it. The agents dont except tools. If tool is passed to an agent of type llm, they are ignored.\n", + "\n", + "3. Tool LLM (`kind: tool`): These agents are just tools or functions that can be executed on the current state. Within the tool will be given the current state as the input, meaning the history of what happened in the network right now" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from flo_ai import Flo\n", + "from flo_ai import FloSession\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", + "\n", + "from dotenv import load_dotenv\n", + "load_dotenv()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup\n", + "Create the Flo session, setup tools" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from langchain_community.tools.tavily_search.tool import TavilySearchResults\n", + "\n", + "llm = ChatOpenAI(temperature=0, model_name='gpt-4o-mini')\n", + "session = FloSession(llm)\n", + "\n", + "session.register_tool(\n", + " name=\"InternetSearchTool\", \n", + " tool=TavilySearchResults()\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Agentic Agent (agentic)\n", + "\n", + "Here we are gonna create a simple weather assitant flo agent that can check the whether by an internet searching tool.\n", + "\n", + "As you can see the kind is `agentic`" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "simple_weather_checking_agent = \"\"\"\n", + "apiVersion: flo/alpha-v1\n", + "kind: FloAgent\n", + "name: weather-assistant\n", + "agent:\n", + " name: Weather Assistant\n", + " kind: agentic\n", + " job: >\n", + " Given the city name you are capable of answering the latest whether this time of the year by searching the internet\n", + " tools:\n", + " - name: InternetSearchTool\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", + "\u001b[32;1m\u001b[1;3m\n", + "Invoking: `tavily_search_results_json` with `{'query': 'California weather October 2023'}`\n", + "\n", + "\n", + "\u001b[0m\u001b[36;1m\u001b[1;3m[{'url': 'https://www.weatherapi.com/', 'content': \"{'location': {'name': 'California City', 'region': 'California', 'country': 'United States of America', 'lat': 35.13, 'lon': -117.99, 'tz_id': 'America/Los_Angeles', 'localtime_epoch': 1725356198, 'localtime': '2024-09-03 02:36'}, 'current': {'last_updated_epoch': 1725355800, 'last_updated': '2024-09-03 02:30', 'temp_c': 25.3, 'temp_f': 77.5, 'is_day': 0, 'condition': {'text': 'Clear', 'icon': '//cdn.weatherapi.com/weather/64x64/night/113.png', 'code': 1000}, 'wind_mph': 3.8, 'wind_kph': 6.1, 'wind_degree': 230, 'wind_dir': 'SW', 'pressure_mb': 1016.0, 'pressure_in': 29.99, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 28, 'cloud': 0, 'feelslike_c': 24.9, 'feelslike_f': 76.9, 'windchill_c': 24.1, 'windchill_f': 75.4, 'heatindex_c': 24.3, 'heatindex_f': 75.7, 'dewpoint_c': 5.8, 'dewpoint_f': 42.4, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 1.0, 'gust_mph': 9.9, 'gust_kph': 15.9}}\"}, {'url': 'https://www.timeanddate.com/weather/usa/los-angeles/historic?month=10&year=2023', 'content': 'Weather reports from October 2023 in Los Angeles, California, USA with highs and lows. Sign in. News. News Home; Astronomy News; Time Zone News; Calendar & Holiday News; Newsletter; Live events. ... High & Low Weather Summary for October 2023 Temperature Humidity Pressure; High: 92 °F (Oct 5, 11:53 am) 100% (Oct 7, 9:02 am) 30.11 \"Hg (Oct 7, 9 ...'}, {'url': 'https://world-weather.info/forecast/usa/california/october-2023/', 'content': 'Detailed ⚡ California Weather Forecast for October 2023 - day/night 🌡️ temperatures, precipitations - World-Weather.info'}, {'url': 'https://www.meteoprog.com/weather/California-california/month/october/', 'content': 'California (United States) weather in October 2023 ☀️ Accurate weather forecast for California in October ⛅ Detailed forecast By month Current temperature \"near me\" Weather news ⊳ Widget of weather ⊳ Water temperature | METEOPROG'}, {'url': 'https://www.latimes.com/environment/story/2023-10-18/california-heat-wave-2023-when-will-it-end', 'content': 'October heat wave could break records in California, but chance of rain awaits next week. Oct. 19, 2023. The unseasonably hot weather brings increased concern for heat-related illness, especially ...'}]\u001b[0m\u001b[32;1m\u001b[1;3mThe weather in California during October 2023 has been characterized by unseasonably high temperatures. For example, in Los Angeles, temperatures reached a high of 92°F (approximately 33°C) on October 5. The weather has been mostly clear, with low humidity levels and little to no precipitation reported.\n", + "\n", + "As of now, the current temperature in California City is about 25.3°C (77.5°F) with clear skies and a light breeze. The humidity is relatively low at 28%, and there is no precipitation expected.\n", + "\n", + "For more detailed forecasts and updates, you can check the following sources:\n", + "- [Weather API](https://www.weatherapi.com/)\n", + "- [Time and Date](https://www.timeanddate.com/weather/usa/los-angeles/historic?month=10&year=2023)\n", + "- [World Weather Info](https://world-weather.info/forecast/usa/california/october-2023/)\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n", + "{'messages': [HumanMessage(content='Whats the whether in california')], 'output': 'The weather in California during October 2023 has been characterized by unseasonably high temperatures. For example, in Los Angeles, temperatures reached a high of 92°F (approximately 33°C) on October 5. The weather has been mostly clear, with low humidity levels and little to no precipitation reported.\\n\\nAs of now, the current temperature in California City is about 25.3°C (77.5°F) with clear skies and a light breeze. The humidity is relatively low at 28%, and there is no precipitation expected.\\n\\nFor more detailed forecasts and updates, you can check the following sources:\\n- [Weather API](https://www.weatherapi.com/)\\n- [Time and Date](https://www.timeanddate.com/weather/usa/los-angeles/historic?month=10&year=2023)\\n- [World Weather Info](https://world-weather.info/forecast/usa/california/october-2023/)'}\n" + ] + } + ], + "source": [ + "flo = Flo.build(session, simple_weather_checking_agent)\n", + "\n", + "print(flo.invoke(\"Whats the whether in california\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Agentic Agent (agentic)\n", + "\n", + "Here we are gonna create a simple llm math assitant flo agent that can check answer any math question\n", + "\n", + "As you can see the kind is `llm`" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "simple_ll_agent = \"\"\"\n", + "apiVersion: flo/alpha-v1\n", + "kind: FloAgent\n", + "name: llm-assistant\n", + "agent:\n", + " name: Ask llm anything\n", + " kind: llm\n", + " job: >\n", + " You are a high school maths teacher. Answer any questions the students ask \n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pythagoras' Theorem is a fundamental principle in geometry that relates to right-angled triangles. It states that in a right triangle, the square of the length of the hypotenuse (the side opposite the right angle) is equal to the sum of the squares of the lengths of the other two sides. \n", + "\n", + "The theorem can be expressed with the formula:\n", + "\n", + "\\[ c^2 = a^2 + b^2 \\]\n", + "\n", + "where:\n", + "- \\( c \\) is the length of the hypotenuse,\n", + "- \\( a \\) and \\( b \\) are the lengths of the other two sides.\n", + "\n", + "This theorem is useful for calculating the length of one side of a right triangle if the lengths of the other two sides are known. Would you like to see an example of how to use it?\n" + ] + } + ], + "source": [ + "flo = Flo.build(session, simple_ll_agent)\n", + "\n", + "print(flo.invoke(\"What is pythagorus theorum\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/email_reply_agent.ipynb b/examples/email_reply_agent.ipynb index d8501a41..17d31b1e 100644 --- a/examples/email_reply_agent.ipynb +++ b/examples/email_reply_agent.ipynb @@ -162,7 +162,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 5, @@ -361,7 +361,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{'SupportSupervisor-6y1Aw': {'next': 'TransactionFetcher-Rchvv'}}\n", + "{'SupportSupervisor-pwZRF': {'next': 'TransactionFetcher-lpv5Y'}}\n", "----\n", "\n", "\n", @@ -376,7 +376,7 @@ "\n", "Thank you for reaching out regarding the failed transaction.\n", "\n", - "The transaction with ID 12123123432 occurred on 23/07/2024 IST and failed due to insufficient balance in the account.\n", + "The transaction with ID 12123123432 occurred on 23/07/2024 IST and unfortunately failed due to insufficient balance in the account.\n", "\n", "If you have any further questions or need assistance with anything else, feel free to ask!\n", "\n", @@ -384,15 +384,15 @@ "Tom\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n", - "{'TransactionFetcher-Rchvv': {'messages': [HumanMessage(content='Subject: Re: Failed Transaction Inquiry\\n\\nHi Vishnu,\\n\\nThank you for reaching out regarding the failed transaction.\\n\\nThe transaction with ID 12123123432 occurred on 23/07/2024 IST and failed due to insufficient balance in the account.\\n\\nIf you have any further questions or need assistance with anything else, feel free to ask!\\n\\nBest regards, \\nTom', name='TransactionFetcher-Rchvv')]}}\n", + "{'TransactionFetcher-lpv5Y': {'messages': [HumanMessage(content='Subject: Re: Failed Transaction Inquiry\\n\\nHi Vishnu,\\n\\nThank you for reaching out regarding the failed transaction.\\n\\nThe transaction with ID 12123123432 occurred on 23/07/2024 IST and unfortunately failed due to insufficient balance in the account.\\n\\nIf you have any further questions or need assistance with anything else, feel free to ask!\\n\\nBest regards, \\nTom', name='TransactionFetcher-lpv5Y')]}}\n", "----\n", - "{'SupportSupervisor-6y1Aw': {'next': 'EmailSender-N1MZP'}}\n", + "{'SupportSupervisor-pwZRF': {'next': 'EmailSender-Bn2zS'}}\n", "----\n", "\n", "\n", "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", "\u001b[32;1m\u001b[1;3m\n", - "Invoking: `email_triage` with `{'email': 'Subject: Re: Failed Transaction Inquiry\\n\\nHi Vishnu,\\n\\nThank you for reaching out regarding the failed transaction.\\n\\nThe transaction with ID 12123123432 occurred on 23/07/2024 IST and failed due to insufficient balance in the account.\\n\\nIf you have any further questions or need assistance with anything else, feel free to ask!\\n\\nBest regards,\\nTom'}`\n", + "Invoking: `email_triage` with `{'email': 'Subject: Re: Failed Transaction Inquiry\\n\\nHi Vishnu,\\n\\nThank you for reaching out regarding the failed transaction.\\n\\nThe transaction with ID 12123123432 occurred on 23/07/2024 IST and unfortunately failed due to insufficient balance in the account.\\n\\nIf you have any further questions or need assistance with anything else, feel free to ask!\\n\\nBest regards,\\nTom'}`\n", "\n", "\n", "\u001b[0mSubject: Re: Failed Transaction Inquiry\n", @@ -401,7 +401,7 @@ "\n", "Thank you for reaching out regarding the failed transaction.\n", "\n", - "The transaction with ID 12123123432 occurred on 23/07/2024 IST and failed due to insufficient balance in the account.\n", + "The transaction with ID 12123123432 occurred on 23/07/2024 IST and unfortunately failed due to insufficient balance in the account.\n", "\n", "If you have any further questions or need assistance with anything else, feel free to ask!\n", "\n", @@ -410,9 +410,9 @@ "\u001b[36;1m\u001b[1;3mEmail sent successfully\u001b[0m\u001b[32;1m\u001b[1;3mI have sent the response to Vishnu regarding the failed transaction. If you need any further assistance, feel free to ask!\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n", - "{'EmailSender-N1MZP': {'messages': [HumanMessage(content='I have sent the response to Vishnu regarding the failed transaction. If you need any further assistance, feel free to ask!', name='EmailSender-N1MZP')]}}\n", + "{'EmailSender-Bn2zS': {'messages': [HumanMessage(content='I have sent the response to Vishnu regarding the failed transaction. If you need any further assistance, feel free to ask!', name='EmailSender-Bn2zS')]}}\n", "----\n", - "{'SupportSupervisor-6y1Aw': {'next': 'FINISH'}}\n", + "{'SupportSupervisor-pwZRF': {'next': 'FINISH'}}\n", "----\n" ] } diff --git a/examples/history_aware_rag.py b/examples/history_aware_rag.py index da2199ee..d7b882eb 100644 --- a/examples/history_aware_rag.py +++ b/examples/history_aware_rag.py @@ -8,7 +8,7 @@ load_dotenv() from flo_ai import FloSession -from flo_ai.retrievers.flo_retriever import FloRagBuilder +from flo_ai.tools.flo_rag_retriver_tool import FloRagRetriverTool db_url = os.getenv("MONGO_DB_URL") @@ -28,13 +28,16 @@ llm = ChatOpenAI(temperature=0, model_name='gpt-4o') session = FloSession(llm) -rag_builder = FloRagBuilder(session, store.as_retriever()) import logging logging.basicConfig() logging.getLogger("langchain.retrievers.multi_query").setLevel(logging.INFO) -rag_tool = rag_builder.with_multi_query().build_rag_tool(name="RAGTool", description="RAG to answer question by looking at db") -print(rag_tool.invoke({"query": "What is the interest rate on housing loans"})) +builder = FloRagRetriverTool.Builder(session, store.as_retriever()) +builder.enable_multi_query() +tool = builder.build(name="RAGTool", description="RAG to answer question by looking at db") + +# rag_tool = rag_builder.with_multi_query().build_rag_tool(name="RAGTool", description="RAG to answer question by looking at db") +print(tool.invoke({"query": "What is the interest rate on housing loans"})) diff --git a/flo_ai/__init__.py b/flo_ai/__init__.py index af3cb80b..3532140d 100644 --- a/flo_ai/__init__.py +++ b/flo_ai/__init__.py @@ -1,5 +1,5 @@ from flo_ai.core import Flo -from flo_ai.models.flo_agent import FloAgentBuilder +from flo_ai.models.flo_agent import FloAgent from flo_ai.router.flo_supervisor import FloSupervisorBuilder from flo_ai.models.flo_team import FloTeamBuilder from flo_ai.state.flo_session import FloSession diff --git a/flo_ai/builders/yaml_builder.py b/flo_ai/builders/yaml_builder.py index 3c74f875..e8ae0345 100644 --- a/flo_ai/builders/yaml_builder.py +++ b/flo_ai/builders/yaml_builder.py @@ -1,11 +1,12 @@ from flo_ai.models.flo_team import FloTeamBuilder -from flo_ai.models.flo_agent import FloAgentBuilder, FloAgent +from flo_ai.models.flo_agent import FloAgent from flo_ai.yaml.flo_team_builder import (FloRoutedTeamConfig, TeamConfig, AgentConfig, FloAgentConfig) from flo_ai.models.flo_executable import ExecutableFlo from flo_ai.models.flo_planner import FloPlannerBuilder from flo_ai.state.flo_session import FloSession from flo_ai.router.flo_router_factory import FloRouterFactory +from flo_ai.factory.agent_factory import AgentFactory from typing import Union def build_supervised_team( @@ -17,49 +18,39 @@ def build_supervised_team( return team elif isinstance(flo_config, FloAgentConfig): agent_config: AgentConfig = flo_config.agent - agent = create_agent(session, agent_config, session.tools) + agent = AgentFactory.create(session, agent_config, session.tools) return agent def parse_and_build_subteams( session: FloSession, - team: TeamConfig, + team_config: TeamConfig, tool_map) -> ExecutableFlo: flo_team = None - if team.agents: + if team_config.agents: agents = [] - for agent in team.agents: - flo_agent: FloAgent = create_agent(session, agent, tool_map) + for agent in team_config.agents: + flo_agent: FloAgent = AgentFactory.create(session, agent, tool_map) agents.append(flo_agent) flo_team = FloTeamBuilder( session=session, - name=team.name, + name=team_config.name, members=agents ).build() - router = FloRouterFactory.create(session, team, flo_team) + router = FloRouterFactory.create(session, team_config, flo_team) flo_routed_team = router.build_routed_team() - if team.planner is not None: - return FloPlannerBuilder(session, team.planner.name, flo_routed_team).build() + if team_config.planner is not None: + return FloPlannerBuilder(session, team_config.planner.name, flo_routed_team).build() else: flo_teams = [] - for subteam in team.subteams: + for subteam in team_config.subteams: flo_subteam = parse_and_build_subteams(session, subteam, tool_map) flo_teams.append(flo_subteam) flo_team = FloTeamBuilder( session=session, - name=team.name, + name=team_config.name, members=flo_teams ).build() - router = FloRouterFactory.create(session, team, flo_team) + router = FloRouterFactory.create(session, team_config, flo_team) flo_routed_team = router.build_routed_team() return flo_routed_team - -def create_agent(session: FloSession, agent: AgentConfig, tool_map) -> FloAgent: - tools = [tool_map[tool.name] for tool in agent.tools] - flo_agent: FloAgent = FloAgentBuilder( - session, - agent.name, - agent.job, - tools - ).build() - return flo_agent diff --git a/flo_ai/factory/agent_factory.py b/flo_ai/factory/agent_factory.py new file mode 100644 index 00000000..bcb58b02 --- /dev/null +++ b/flo_ai/factory/agent_factory.py @@ -0,0 +1,45 @@ +from flo_ai.state.flo_session import FloSession +from flo_ai.yaml.flo_team_builder import (AgentConfig) +from flo_ai.models.flo_agent import FloAgent +from flo_ai.models.flo_llm_agent import FloLLMAgent +from enum import Enum + +class AgentKinds(Enum): + executable = "executable" + + agentic = "agentic" + llm = "llm" + + +class AgentFactory(): + + @staticmethod + def create(session: FloSession, agent: AgentConfig, tool_map): + kind = agent.kind + if kind is not None: + agent_kind = getattr(AgentKinds, kind, None) + if agent_kind is None: + raise ValueError(f"Agent kind cannot be {kind} !") + match(agent_kind): + case AgentKinds.llm: + return AgentFactory.__create_llm_agent(session, agent) + case AgentKinds.executable: + raise ValueError("un-supported") + return AgentFactory.__create_agentic_agent(session, agent, tool_map) + + @staticmethod + def __create_agentic_agent(session: FloSession, agent: AgentConfig, tool_map) -> FloAgent: + tools = [tool_map[tool.name] for tool in agent.tools] + flo_agent: FloAgent = FloAgent.Builder( + session, + agent.name, + agent.job, + tools + ).build() + return flo_agent + + @staticmethod + def __create_llm_agent(session: FloSession, agent: AgentConfig) -> FloLLMAgent: + builder = FloLLMAgent.Builder(session, agent.name, agent.job, agent.role) + llm_agent: FloLLMAgent = builder.build() + return llm_agent \ No newline at end of file diff --git a/flo_ai/models/flo_agent.py b/flo_ai/models/flo_agent.py index 25d3dd2a..260cbb9a 100644 --- a/flo_ai/models/flo_agent.py +++ b/flo_ai/models/flo_agent.py @@ -18,37 +18,37 @@ def __init__(self, self.agent: Runnable = agent, self.executor: AgentExecutor = executor -class FloAgentBuilder: - def __init__(self, - session: FloSession, - name: str, - prompt: Union[ChatPromptTemplate, str], - tools: list[BaseTool], - verbose: bool = True, - role: Optional[str] = None, - llm: Union[BaseLanguageModel, None] = None, - return_intermediate_steps: bool = False, - handle_parsing_errors: bool = True) -> None: - self.name: str = randomize_name(name) - self.llm = llm if llm is not None else session.llm - # TODO improve to add more context of what other agents are available - system_prompts = [("system", "You are a {}".format(role)), ("system", prompt)] if role is not None else [("system", prompt)] - system_prompts.append(MessagesPlaceholder(variable_name="messages")) - system_prompts.append(MessagesPlaceholder(variable_name="agent_scratchpad")) - self.prompt: ChatPromptTemplate = ChatPromptTemplate.from_messages( - system_prompts - ) if isinstance(prompt, str) else prompt - self.tools: list[BaseTool] = tools - self.verbose = verbose - self.return_intermediate_steps = return_intermediate_steps - self.handle_parsing_errors = handle_parsing_errors + class Builder: + def __init__(self, + session: FloSession, + name: str, + prompt: Union[ChatPromptTemplate, str], + tools: list[BaseTool], + verbose: bool = True, + role: Optional[str] = None, + llm: Union[BaseLanguageModel, None] = None, + return_intermediate_steps: bool = False, + handle_parsing_errors: bool = True) -> None: + self.name: str = randomize_name(name) + self.llm = llm if llm is not None else session.llm + # TODO improve to add more context of what other agents are available + system_prompts = [("system", "You are a {}".format(role)), ("system", prompt)] if role is not None else [("system", prompt)] + system_prompts.append(MessagesPlaceholder(variable_name="messages")) + system_prompts.append(MessagesPlaceholder(variable_name="agent_scratchpad")) + self.prompt: ChatPromptTemplate = ChatPromptTemplate.from_messages( + system_prompts + ) if isinstance(prompt, str) else prompt + self.tools: list[BaseTool] = tools + self.verbose = verbose + self.return_intermediate_steps = return_intermediate_steps + self.handle_parsing_errors = handle_parsing_errors - def build(self) -> AgentExecutor: - agent = create_tool_calling_agent(self.llm, self.tools, self.prompt) - executor = AgentExecutor(agent=agent, - tools=self.tools, - verbose=self.verbose, - return_intermediate_steps=self.return_intermediate_steps, - handle_parsing_errors=self.handle_parsing_errors) - return FloAgent(agent, executor, self.name) + def build(self) -> AgentExecutor: + agent = create_tool_calling_agent(self.llm, self.tools, self.prompt) + executor = AgentExecutor(agent=agent, + tools=self.tools, + verbose=self.verbose, + return_intermediate_steps=self.return_intermediate_steps, + handle_parsing_errors=self.handle_parsing_errors) + return FloAgent(agent, executor, self.name) diff --git a/flo_ai/models/flo_executable.py b/flo_ai/models/flo_executable.py index f60a5468..32fb101a 100644 --- a/flo_ai/models/flo_executable.py +++ b/flo_ai/models/flo_executable.py @@ -1,21 +1,24 @@ from flo_ai.models.flo_member import FloMember -from langgraph.graph.graph import CompiledGraph +from langchain_core.runnables import Runnable from langchain_core.messages import HumanMessage class ExecutableFlo(FloMember): - def __init__(self, name: str, graph: CompiledGraph, type: str = "team") -> None: + def __init__(self, + name: str, + runnable: Runnable, + type: str = "team") -> None: super().__init__(name, type) - self.graph = graph + self.runnable = runnable def stream(self, work, config = None): - return self.graph.stream({ + return self.runnable.stream({ "messages": [ HumanMessage(content=work) ] }, config) def invoke(self, work, config = None): - return self.graph.invoke({ + return self.runnable.invoke({ "messages": [ HumanMessage(content=work) ] diff --git a/flo_ai/models/flo_llm_agent.py b/flo_ai/models/flo_llm_agent.py new file mode 100644 index 00000000..8886866a --- /dev/null +++ b/flo_ai/models/flo_llm_agent.py @@ -0,0 +1,39 @@ +from langchain_core.tools import BaseTool +from langchain_core.runnables import Runnable +from langchain.agents import create_tool_calling_agent +from langchain_core.runnables import Runnable +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder +from flo_ai.helpers.utils import randomize_name +from flo_ai.models.flo_executable import ExecutableFlo +from flo_ai.state.flo_session import FloSession +from typing import Union, Optional +from langchain_core.output_parsers import StrOutputParser + +class FloLLMAgent(ExecutableFlo): + def __init__(self, + executor: Runnable, + name: str) -> None: + super().__init__(name, executor, "agent") + self.executor: Runnable = executor + + class Builder: + def __init__(self, + session: FloSession, + name: str, + prompt: Union[ChatPromptTemplate, str], + role: Optional[str] = None, + llm: Union[BaseLanguageModel, None] = None) -> None: + self.name: str = randomize_name(name) + self.llm = llm if llm is not None else session.llm + # TODO improve to add more context of what other agents are available + system_prompts = [("system", "You are a {}".format(role)), ("system", prompt)] if role is not None else [("system", prompt)] + system_prompts.append(MessagesPlaceholder(variable_name="messages")) + self.prompt: ChatPromptTemplate = ChatPromptTemplate.from_messages( + system_prompts + ) if isinstance(prompt, str) else prompt + + + def build(self) -> Runnable: + executor = self.prompt | self.llm | StrOutputParser() + return FloLLMAgent(executor, self.name) diff --git a/flo_ai/models/flo_node.py b/flo_ai/models/flo_node.py new file mode 100644 index 00000000..acbdb6f0 --- /dev/null +++ b/flo_ai/models/flo_node.py @@ -0,0 +1,58 @@ +import functools +from flo_ai.models.flo_agent import FloAgent +from flo_ai.models.flo_routed_team import FloRoutedTeam +from langchain.agents import AgentExecutor +from flo_ai.state.flo_state import TeamFloAgentState +from langchain_core.messages import HumanMessage +from langchain_core.runnables import Runnable + +class FloNode(): + + def __init__(self, + func: functools.partial, + name: str) -> None: + self.name = name + self.func = func + + class Builder(): + + @staticmethod + def teamflo_agent_node(state: TeamFloAgentState, agent: AgentExecutor, name: str): + result = agent.invoke(state) + return {"messages": [HumanMessage(content=result["output"], name=name)]} + + @staticmethod + def teamflo_tool_node(state: TeamFloAgentState, tool: Runnable, name): + # TODO see if you want to send the entire data or not + result = tool.invoke(state["messages"][-1]) + return {"messages": [HumanMessage(content=result, name=name)]} + + @staticmethod + def get_last_message(state: TeamFloAgentState) -> str: + return state["messages"][-1].content + + @staticmethod + def join_graph(response: dict): + return { "messages": [ response["messages"][-1] ] } + + @staticmethod + def teamflo_team_node( message: str, members: list[str]): + results = { + "messages": [HumanMessage(content=message)], + "team_members": ", ".join(members), + } + return results + + def build_from_agent(self, flo_agent: FloAgent): + agent_func = functools.partial(FloNode.Builder.teamflo_agent_node, agent=flo_agent.executor, name=flo_agent.name) + return FloNode(agent_func, flo_agent.name) + + def build_from_team(self, flo_team: FloRoutedTeam): + return FloNode(( + FloNode.Builder.get_last_message | functools.partial(FloNode.Builder.teamflo_team_node, members=flo_team.graph.nodes) + | flo_team.graph | FloNode.Builder.join_graph + ), flo_team.name) + + # TODO add type to tool + def build_from_tool(self, tool: Runnable, name: str): + return FloNode(functools.partial(FloNode.Builder.teamflo_tool_node, tool=tool, name=name)) \ No newline at end of file diff --git a/flo_ai/models/flo_rag.py b/flo_ai/models/flo_rag.py index 8bae8394..7c73b261 100644 --- a/flo_ai/models/flo_rag.py +++ b/flo_ai/models/flo_rag.py @@ -48,13 +48,9 @@ def generate(self, state: TeamFloAgentState): question = messages[0].content docs = last_message.content - # Chain rag_chain = self.prompt | self.llm - - # Run response = rag_chain.invoke({"context": docs, "question": question}) - - return {"messages": [response] } + return { "messages": [response] } def build(self) -> FloRag: retrieve = ToolNode(self.tools) diff --git a/flo_ai/retrievers/flo_retriever.py b/flo_ai/retrievers/flo_retriever.py index 7a95c315..dbd2d3b0 100644 --- a/flo_ai/retrievers/flo_retriever.py +++ b/flo_ai/retrievers/flo_retriever.py @@ -9,11 +9,17 @@ from langchain.retrievers.document_compressors import DocumentCompressorPipeline from flo_ai.retrievers.flo_compression_pipeline import FloCompressionPipeline from langchain.tools.retriever import create_retriever_tool -from flo_ai.tools.flo_rag_tool import create_flo_rag_tool +from functools import partial +from langchain.pydantic_v1 import BaseModel, Field from langchain_core.tools import Tool +class FloRagToolInput(BaseModel): + query: str = Field(description="query to look up in retriever") + class FloRagBuilder(): - def __init__(self, session: FloSession, retriever: VectorStoreRetriever) -> None: + def __init__(self, + session: FloSession, + retriever: VectorStoreRetriever) -> None: self.session = session self.retriever = retriever self.default_prompt = ChatPromptTemplate.from_messages( @@ -109,6 +115,40 @@ def build_rag(self): def build_retriever_tool(self, name, description): return create_retriever_tool(self.retriever, name, description) + @staticmethod + def __get_rag_answer(query: str, runnable: Runnable): + result = runnable.invoke({ "question": query }) + return result["answer"].content + + @staticmethod + async def __aget_rag_answer(query: str, runnable: Runnable): + result = await runnable.ainvoke({ "question": query }) + return result["answer"].content + + @staticmethod + def __create_flo_rag_tool( + runnable_rag: Runnable, + name: str, + description: str + ) -> Tool: + func = partial( + FloRagBuilder.__get_rag_answer, + runnable=runnable_rag + ) + + afunc = partial( + FloRagBuilder.__aget_rag_answer, + runnable=runnable_rag + ) + + return Tool( + name=name, + description=description, + func=func, + coroutine=afunc, + args_schema=FloRagToolInput, + ) + def build_rag_tool(self, name, description) -> Tool: rag = self.__build_history_aware_rag() - return create_flo_rag_tool(rag, name, description) + return FloRagBuilder.__create_flo_rag_tool(rag, name, description) \ No newline at end of file diff --git a/flo_ai/router/flo_linear.py b/flo_ai/router/flo_linear.py index 13937197..86ed5e16 100644 --- a/flo_ai/router/flo_linear.py +++ b/flo_ai/router/flo_linear.py @@ -19,8 +19,7 @@ def build_agent_graph(self): for flo_agent_node in flo_agent_nodes: agent_name = agent_name_from_randomized_name(flo_agent_node.name) - workflow.add_node(agent_name, flo_agent_node.agent_node) - + workflow.add_node(agent_name, flo_agent_node.func) if self.config.edges is None: start_node_name = agent_name_from_randomized_name(flo_agent_nodes[0].name) end_node_name = agent_name_from_randomized_name(flo_agent_nodes[-1].name) @@ -42,13 +41,13 @@ def build_agent_graph(self): return FloRoutedTeam(self.flo_team.name, workflow_graph) def build_team_graph(self): - flo_team_entry_chains = [self.build_chain_for_teams(flo_agent) for flo_agent in self.members] + flo_team_entry_chains = [self.build_node_for_teams(flo_agent) for flo_agent in self.members] # Define the graph. super_graph = StateGraph(TeamFloAgentState) # First add the nodes, which will do the work for flo_team_chain in flo_team_entry_chains: agent_name = agent_name_from_randomized_name(flo_team_chain.name) - super_graph.add_node(agent_name, self.get_last_message | flo_team_chain.chain | self.join_graph) + super_graph.add_node(agent_name, flo_team_chain.func) if self.config.edges is None: start_node_name = agent_name_from_randomized_name(flo_team_entry_chains[0].name) diff --git a/flo_ai/router/flo_router.py b/flo_ai/router/flo_router.py index 1583f2f3..71333c2a 100644 --- a/flo_ai/router/flo_router.py +++ b/flo_ai/router/flo_router.py @@ -2,42 +2,15 @@ import functools from abc import ABC, abstractmethod from flo_ai.state.flo_session import FloSession -from flo_ai.models.flo_member import FloMember from flo_ai.models.flo_team import FloTeam from flo_ai.yaml.flo_team_builder import RouterConfig from flo_ai.models.flo_routed_team import FloRoutedTeam from flo_ai.models.flo_agent import FloAgent from flo_ai.state.flo_state import TeamFloAgentState -from langchain.agents import AgentExecutor -from langchain_core.messages import HumanMessage +from flo_ai.models.flo_node import FloNode from flo_ai.constants.prompt_constants import FLO_FINISH from langgraph.graph import END -def teamflo_agent_node(state: TeamFloAgentState, agent: AgentExecutor, name: str): - result = agent.invoke(state) - return {"messages": [HumanMessage(content=result["output"], name=name)]} - -class FloAgentNode: - def __init__(self, - agent_node: functools.partial, - name: str) -> None: - self.name = name - self.agent_node = agent_node - -class FloTeamChain(): - def __init__(self, name: str, chain) -> None: - self.chain = chain - self.name = name - -# this makes it so that the states of each graph don't get intermixed -def enter_chain(message: str, members: list[str]): - results = { - "messages": [HumanMessage(content=message)], - "team_members": ", ".join(members), - } - return results - - class FloRouter(ABC): def __init__(self, session: FloSession, name: str, flo_team: FloTeam, executor, config: RouterConfig = None): @@ -67,16 +40,9 @@ def build_agent_graph(): def build_team_graph(): pass - - def get_last_message(self, state: TeamFloAgentState, second = None) -> str: - return state["messages"][-1].content - - def join_graph(self, response: dict): - return {"messages": [response["messages"][-1]]} - def build_node(self, flo_agent: FloAgent): - agent_func = functools.partial(teamflo_agent_node, agent=flo_agent.executor, name=flo_agent.name) - return FloAgentNode(agent_func, flo_agent.name) + node_builder = FloNode.Builder() + return node_builder.build_from_agent(flo_agent) def router_fn(self, state: TeamFloAgentState): next = state["next"] @@ -88,11 +54,8 @@ def router_fn(self, state: TeamFloAgentState): return conditional_map[next] - def build_chain_for_teams(self, flo_team: FloRoutedTeam): - # TODO lets see if we can convert to members - return FloTeamChain(flo_team.name, ( - functools.partial(enter_chain, members=flo_team.graph.nodes) - | flo_team.graph - )) + def build_node_for_teams(self, flo_team: FloRoutedTeam): + node_builder = FloNode.Builder() + return node_builder.build_from_team(flo_team) \ No newline at end of file diff --git a/flo_ai/router/flo_supervisor.py b/flo_ai/router/flo_supervisor.py index c1d68713..98438909 100644 --- a/flo_ai/router/flo_supervisor.py +++ b/flo_ai/router/flo_supervisor.py @@ -1,8 +1,8 @@ from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser -from langchain.chains import LLMChain from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from typing import Union +from langchain_core.runnables import Runnable from flo_ai.state.flo_session import FloSession from flo_ai.constants.prompt_constants import FLO_FINISH from flo_ai.helpers.utils import randomize_name @@ -35,7 +35,7 @@ class FloSupervisor(FloRouter): def __init__(self, session: FloSession, - executor: LLMChain, + executor: Runnable, flo_team: FloTeam, name: str) -> None: super().__init__( @@ -49,7 +49,7 @@ def build_agent_graph(self): flo_agent_nodes = [self.build_node(flo_agent) for flo_agent in self.members] workflow = StateGraph(TeamFloAgentState) for flo_agent_node in flo_agent_nodes: - workflow.add_node(flo_agent_node.name, flo_agent_node.agent_node) + workflow.add_node(flo_agent_node.name, flo_agent_node.func) workflow.add_node(self.router_name, self.executor) for member in self.member_names: workflow.add_edge(member, self.router_name) @@ -59,12 +59,12 @@ def build_agent_graph(self): return FloRoutedTeam(self.flo_team.name, workflow_graph) def build_team_graph(self): - flo_team_entry_chains = [self.build_chain_for_teams(flo_agent) for flo_agent in self.members] + flo_team_entry_chains = [self.build_node_for_teams(flo_agent) for flo_agent in self.members] # Define the graph. super_graph = StateGraph(TeamFloAgentState) # First add the nodes, which will do the work for flo_team_chain in flo_team_entry_chains: - super_graph.add_node(flo_team_chain.name, self.get_last_message | flo_team_chain.chain | self.join_graph) + super_graph.add_node(flo_team_chain.name, flo_team_chain.func) super_graph.add_node(self.router_name, self.executor) for member in self.member_names: diff --git a/flo_ai/state/flo_session.py b/flo_ai/state/flo_session.py index a177ce79..f5203302 100644 --- a/flo_ai/state/flo_session.py +++ b/flo_ai/state/flo_session.py @@ -1,10 +1,12 @@ from langchain_core.language_models import BaseLanguageModel from langchain_core.tools import BaseTool +from langchain_core.runnables import Runnable class FloSession: def __init__(self, llm: BaseLanguageModel, loop_size: int = 2, max_loop: int = 3) -> None: self.llm = llm self.tools = dict() + self.executables = dict() self.counter = dict() self.navigation: list[str] = list() self.pattern_series = dict() @@ -14,6 +16,10 @@ def __init__(self, llm: BaseLanguageModel, loop_size: int = 2, max_loop: int = 3 def register_tool(self, name: str, tool: BaseTool): self.tools[name] = tool return self + + def register_executable(self, name: str, tool: Runnable): + self.executables[name] = dict() + return self def append(self, node: str) -> int: self.counter[node] = self.counter.get(node, 0) + 1 diff --git a/flo_ai/tools/flo_pdf_rag_tool.py b/flo_ai/tools/flo_pdf_rag_tool.py new file mode 100644 index 00000000..412ff194 --- /dev/null +++ b/flo_ai/tools/flo_pdf_rag_tool.py @@ -0,0 +1,17 @@ +from langchain.tools import Tool +from langchain_core.runnables import Runnable +from flo_ai.retrievers.flo_retriever import FloRagBuilder +from flo_ai.state.flo_session import FloSession +from flo_ai.retrievers.flo_compression_pipeline import FloCompressionPipeline +from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain_core.vectorstores import VectorStoreRetriever +from flo_ai.tools.flo_rag_retriver_tool import FloRagRetrieverTool + +class FloPDFRagTool(): + + def __init__(self) -> None: + raise ValueError("You are supposed to use Builder, FloRagRetriverTool.Builder()") + + class Builder(FloRagRetrieverTool.Builder): + def __init__(self) -> None: + super().__init__() \ No newline at end of file diff --git a/flo_ai/tools/flo_rag_retriver_tool.py b/flo_ai/tools/flo_rag_retriver_tool.py new file mode 100644 index 00000000..ddadbe51 --- /dev/null +++ b/flo_ai/tools/flo_rag_retriver_tool.py @@ -0,0 +1,57 @@ +from langchain.tools import Tool +from langchain_core.runnables import Runnable +from flo_ai.retrievers.flo_retriever import FloRagBuilder +from flo_ai.state.flo_session import FloSession +from flo_ai.retrievers.flo_compression_pipeline import FloCompressionPipeline +from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain_core.vectorstores import VectorStoreRetriever + +class FloRagRetrieverTool(): + + def __init__(self) -> None: + raise ValueError("You are supposed to use Builder, FloRagRetriverTool.Builder()") + + class Builder(): + + def __init__(self, + session: FloSession, + retriever: VectorStoreRetriever) -> None: + self.session = session + self.retriver = retriever + self.multiquery = False + self.custom_prompt: ChatPromptTemplate = None + self.extraction_pipeline: FloCompressionPipeline = None + + def add_custom_prompt(self, custom_prompt: ChatPromptTemplate): + self.custom_prompt = custom_prompt + + def add_custom_system_prompt(self, custom_system_prompt: str): + system_prompt = custom_system_prompt if custom_system_prompt is not None else """You are an assistant for question-answering tasks. + Use the following pieces of retrieved context to answer the question. + If you don't know the answer, just say that you don't know. + Use three sentences maximum and keep the answer concise.""" + self.custom_prompt = ChatPromptTemplate.from_messages( + [ + ("system", system_prompt), + MessagesPlaceholder(variable_name="chat_history"), + ("human", "{question}"), + ] + ) + + def add_extraction_pipeline(self, pipeline: FloCompressionPipeline): + self.extraction_pipeline = pipeline + + def enable_multi_query(self): + self.multiquery = True + + def build(self, name: str, description: str) -> Tool: + rag_builder = FloRagBuilder(self.session, self.retriver) + if self.custom_prompt is not None: + rag_builder.with_prompt(self.custom_prompt) + if self.multiquery: + rag_builder.with_multi_query() + if self.extraction_pipeline is not None: + rag_builder.with_compression(self.extraction_pipeline) + return rag_builder.build_rag_tool(name=name, description=description) + + diff --git a/flo_ai/tools/flo_rag_tool.py b/flo_ai/tools/flo_rag_tool.py deleted file mode 100644 index 97b3944e..00000000 --- a/flo_ai/tools/flo_rag_tool.py +++ /dev/null @@ -1,39 +0,0 @@ -from langchain.pydantic_v1 import BaseModel, Field -from langchain.tools import Tool -from langchain_core.runnables import Runnable -from functools import partial - -class FloRagToolInput(BaseModel): - query: str = Field(description="query to look up in retriever") - -def __get_rag_answer(query: str, runnable: Runnable): - result = runnable.invoke({ "question": query }) - return result["answer"].content - -async def __aget_rag_answer(query: str, runnable: Runnable): - result = await runnable.ainvoke({ "question": query }) - return result["answer"].content - -def create_flo_rag_tool( - runnable_rag: Runnable, - name: str, - description: str -) -> Tool: - func = partial( - __get_rag_answer, - runnable=runnable_rag - ) - - afunc = partial( - __aget_rag_answer, - runnable=runnable_rag - ) - - return Tool( - name=name, - description=description, - func=func, - coroutine=afunc, - args_schema=FloRagToolInput, - ) - diff --git a/flo_ai/yaml/flo_team_builder.py b/flo_ai/yaml/flo_team_builder.py index 69bc3e37..479b103d 100644 --- a/flo_ai/yaml/flo_team_builder.py +++ b/flo_ai/yaml/flo_team_builder.py @@ -33,7 +33,7 @@ class AgentConfig(BaseModel): name: str role: Optional[str] = None kind: Optional[str] = None - job: str + job: Optional[str] = None tools: List[ToolConfig] = [] class RouterConfig(BaseModel): @@ -48,6 +48,7 @@ class PlannerConfig(BaseModel): class TeamConfig(BaseModel): name: str + kind: Optional[str] = None agents: Optional[List[AgentConfig]] = None subteams: Optional[List['TeamConfig']] = None router: Optional[RouterConfig] = None @@ -61,7 +62,7 @@ class FloRoutedTeamConfig(BaseModel): class FloAgentConfig(BaseModel): apiVersion: str - kind: str + kind: Optional[str] = None name: str agent: AgentConfig