Skip to content
2 changes: 1 addition & 1 deletion ROADMAP.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ Core features improve the library itself to cater wider range of functionalities
| Name | Description | Status | Release version |
|------|-------------|--------|-----------------|
|Linear Router|A router that lets you build agents or teams that execute linearly or sequentially. The current router supervisor works in a hierarchical way where all the children report to one parent| In progress | 0.0.3|
|Reflexion| Reflection lets you build a component that can make the AI retrospectively look at the current output and retry or work again on the task at hand| Yet to start| 0.0.3|
|Reflection| Reflection lets you build a component that can make the AI retrospectively look at the current output and retry or work again on the task at hand| Yet to start| 0.0.3|
|Output formatter| Ability to templatize output format using pydantic| Yet to start| 0.0.4|
|LLM Extensibilty| Ability to different LLMs across different agents and teams| Yet to start| 0.0.4|
|Auto-Query RAG| Ability to make metadata query within the agentic, which can automatically add metadata while rag query runs, like timestamp or other data|Yet to start|TBD|
Expand Down
752 changes: 265 additions & 487 deletions examples/agent_of_flo_ai.ipynb

Large diffs are not rendered by default.

251 changes: 251 additions & 0 deletions examples/agentic_rag.ipynb

Large diffs are not rendered by default.

51 changes: 51 additions & 0 deletions examples/delegator_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
from flo_ai.core import Flo
from flo_ai import FloSession
from langchain_openai import ChatOpenAI
from langchain_community.tools.tavily_search.tool import TavilySearchResults
from dotenv import load_dotenv
load_dotenv()

yaml_data = """
apiVersion: flo/alpha-v1
kind: FloRoutedTeam
name: adding-team
team:
name: EssayTeam
agents:
- name: EssayWriter
kind: llm
job: >
You are an essay assistant tasked with writing excellent 300 words essay. Generate the best essay possible for the user's request.
If the you are provided critique view, respond with a revised version of your previous attempts. A maximum of total 100 words
- name: DelegatorAgent
kind: delegator
retry: 1
to:
- name: EssayWriter
job: >
You are a teacher grading an essay submission. Score the essay between 1 to 10, with 10 being perfect
If the score is greater than 7 sent it to FinalEssayProducer
else if its less than or equal to 7 sent it to EssayWriter with suggestions to change
- name: FinalEssayProducer
kind: llm
job: >
Generate the final assay to be returned to the user
router:
name: router
kind: linear
"""

input_prompt = """
Question: Write me an interesting blog about latest advancements in agentic AI by reasearching the internet
"""

llm = ChatOpenAI(temperature=0, model_name='gpt-4o-mini')
session = FloSession(llm).register_tool(
name="TavilySearchResults",
tool=TavilySearchResults()
)

flo: Flo = Flo.build(session, yaml=yaml_data)
flo.draw_to_file("delegate.png", xray=True)
# data = flo.invoke(input_prompt)
# print((data['messages'][-1]).content)
16 changes: 8 additions & 8 deletions examples/linear_router_team.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,14 +42,14 @@ async def _arun(
name: data-processing-pipline
kind: linear
agents:
- name: Reasercher
job: Do a research on the internet and find articles of relevent to the topic asked by the user, always try to find the latest information on the same
tools:
- name: TavilySearchResults
- name: Blogger
job: From the documents provider by the researcher write a blog of 300 words with can be readily published, make in engaging and add reference links to original blogs
tools:
- name: TavilySearchResults
- name: Reasercher
job: Do a research on the internet and find articles of relevent to the topic asked by the user, always try to find the latest information on the same
tools:
- name: TavilySearchResults
- name: Blogger
job: From the documents provider by the researcher write a blog of 300 words with can be readily published, make in engaging and add reference links to original blogs
tools:
- name: TavilySearchResults
"""

input_prompt = """
Expand Down
70 changes: 70 additions & 0 deletions examples/rag_tool.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
from langchain.tools import BaseTool
from flo_ai import Flo
from flo_ai import FloSession
from flo_ai.common.flo_logger import get_logger
from flo_ai.common.flo_langchain_logger import FloLangchainLogger
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_chroma import Chroma
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_text_splitters import CharacterTextSplitter

from dotenv import load_dotenv
load_dotenv()



llm = ChatOpenAI(temperature=0, model_name='gpt-4o-mini')

session = FloSession(
llm,
log_level="ERROR"
)

# load the document and split it into chunks
loader = TextLoader("./examples/rag_document.txt")
documents = loader.load()

# split it into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)

# create the open-source embedding function
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")

# load it into Chroma
db = Chroma.from_documents(docs, embedding_function)

from flo_ai.retrievers.flo_retriever import FloRagBuilder
from flo_ai.retrievers.flo_compression_pipeline import FloCompressionPipeline

llm = ChatOpenAI(temperature=0, model_name='gpt-4o-mini')
session = FloSession(llm)
builder = FloRagBuilder(session, db.as_retriever())
compression_pipeline = FloCompressionPipeline(OpenAIEmbeddings(model="text-embedding-3-small"))
compression_pipeline.add_embedding_reduntant_filter()
compression_pipeline.add_embedding_relevant_filter()
# Reranking

retriever_tool = builder.with_compression(compression_pipeline).build_rag_tool(name="HousingLoanRetreiver",
description="Tool to fetch data around housing loans")
session.register_tool(name="HousingLoanTool", tool=retriever_tool)

simple_tool_agent = """
apiVersion: flo/alpha-v1
kind: FloAgent
name: llm-assistant
agent:
name: tool-get-loan
kind: agentic
job: To retrieve and answer user questions
tools:
- name: HousingLoanTool
"""

flo = Flo.build(session, simple_tool_agent)

print(flo.invoke("Whats interest rate on loan"))
7 changes: 6 additions & 1 deletion examples/rag_with_reranking.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,15 @@
]
)

from langchain.schema import BaseMessage
compression_pipeline = FloCompressionPipeline(OpenAIEmbeddings(model="text-embedding-3-small"))
compression_pipeline.add_embedding_reduntant_filter()
compression_pipeline.add_embedding_relevant_filter()

rag = rag_builder.with_prompt(custom_prompt).with_multi_query().with_compression(compression_pipeline).build_rag()
rag = rag_builder.with_prompt(
custom_prompt
).with_multi_query().with_compression(
compression_pipeline
).build_rag()
print(rag.invoke({ "question": "What are the documents applying for housing loan" }))

50 changes: 50 additions & 0 deletions examples/reflection_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
from flo_ai.core import Flo
from flo_ai import FloSession
from langchain_openai import ChatOpenAI
from langchain_community.tools.tavily_search.tool import TavilySearchResults
from dotenv import load_dotenv
load_dotenv()

yaml_data = """
apiVersion: flo/alpha-v1
kind: FloRoutedTeam
name: adding-team
team:
name: EssayTeam
agents:
- name: EssayWriter
kind: llm
job: >
You are an essay assistant tasked with writing excellent 300-words essays. Generate the best essay possible for the user's request.
If the you are provided critique view, respond with a revised version of your previous attempts. A maximum of total 100 words
- name: ReflectionAgent
kind: reflection
retry: 1
to:
- name: EssayWriter
job: >
You are a teacher grading an essay submission. Generate critique and recommendations for the user's submission.
Provide detailed recommendations, including requests for length, depth, style, etc.
- name: FinalEssayProducer
kind: llm
job: >
Generate the final assay to be returned to the user
router:
name: router
kind: linear
"""

input_prompt = """
Question: Write me an interesting blog about latest advancements in agentic AI by reasearching the internet
"""

llm = ChatOpenAI(temperature=0, model_name='gpt-4o-mini')
session = FloSession(llm).register_tool(
name="TavilySearchResults",
tool=TavilySearchResults()
)

flo: Flo = Flo.build(session, yaml=yaml_data)
flo.draw_to_file("event.png", xray=True)
data = flo.invoke(input_prompt)
print((data['messages'][-1]).content)
54 changes: 54 additions & 0 deletions examples/tool_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
from flo_ai import Flo
from flo_ai import FloSession
from flo_ai.common.flo_logger import get_logger
from flo_ai.common.flo_langchain_logger import FloLangchainLogger
from langchain_openai import ChatOpenAI, OpenAIEmbeddings

from dotenv import load_dotenv
load_dotenv()

from langchain_community.tools.tavily_search.tool import TavilySearchResults
from flo_ai.common.flo_langchain_logger import FloLangchainLogger

llm = ChatOpenAI(temperature=0, model_name='gpt-4o-mini')

session = FloSession(
llm,
log_level="ERROR"
)

from langchain.tools import BaseTool

class PrintStateTool(BaseTool):
name = "printStateTool"
description = "Just print the state"

def _run(
self, **kwargs
) -> str:
return "Print tool call success"

session.register_tool(
name="printStateTool",
tool=PrintStateTool()
)

simple_tool_agent = """
apiVersion: flo/alpha-v1
kind: FloRoutedTeam
name: llm-assistant
team:
name: tool-to-print-state
router:
name: LinearRouter
kind: linear
agents:
- name: tool-to-print
kind: tool
tools:
- name: printStateTool
"""

flo = Flo.build(session, simple_tool_agent)

print(flo.invoke("Testing ...."))
38 changes: 10 additions & 28 deletions flo_ai/builders/yaml_builder.py
Original file line number Diff line number Diff line change
@@ -1,53 +1,35 @@
from flo_ai.models.flo_team import FloTeam
from flo_ai.models.flo_agent import FloAgent
from flo_ai.yaml.flo_team_builder import (FloRoutedTeamConfig, TeamConfig,
AgentConfig, FloAgentConfig)
from flo_ai.yaml.config import (FloRoutedTeamConfig, TeamConfig, AgentConfig, FloAgentConfig)
from flo_ai.models.flo_executable import ExecutableFlo
from flo_ai.models.flo_planner import FloPlannerBuilder
from flo_ai.state.flo_session import FloSession
from flo_ai.router.flo_router_factory import FloRouterFactory
from flo_ai.factory.agent_factory import AgentFactory
from typing import Union

def build_supervised_team(
session: FloSession,
flo_config: Union[FloRoutedTeamConfig, FloAgentConfig]) -> ExecutableFlo:
def build_supervised_team(session: FloSession) -> ExecutableFlo:
flo_config = session.config
if isinstance(flo_config, FloRoutedTeamConfig):
team_config: TeamConfig = flo_config.team
team = parse_and_build_subteams(session, team_config, session.tools)
team = parse_and_build_subteams(session, team_config)
return team
elif isinstance(flo_config, FloAgentConfig):
agent_config: AgentConfig = flo_config.agent
agent = AgentFactory.create(session, agent_config, session.tools)
agent = AgentFactory.create(session, agent_config)
return agent

def parse_and_build_subteams(
session: FloSession,
team_config: TeamConfig,
tool_map) -> ExecutableFlo:
def parse_and_build_subteams(session: FloSession, team_config: TeamConfig) -> ExecutableFlo:
flo_team = None
if team_config.agents:
agents = []
for agent in team_config.agents:
flo_agent: FloAgent = AgentFactory.create(session, agent, tool_map)
agents.append(flo_agent)
flo_team = FloTeam.Builder(
session=session,
name=team_config.name,
members=agents
).build()
members = [AgentFactory.create(session, agent) for agent in team_config.agents]
flo_team = FloTeam.Builder(team_config, members=members).build()
router = FloRouterFactory.create(session, team_config, flo_team)
flo_routed_team = router.build_routed_team()
else:
flo_teams = []
for subteam in team_config.subteams:
flo_subteam = parse_and_build_subteams(session, subteam, tool_map)
flo_subteam = parse_and_build_subteams(session, subteam)
flo_teams.append(flo_subteam)
flo_team = FloTeam.Builder(
session=session,
name=team_config.name,
members=flo_teams
).build()
flo_team = FloTeam.Builder(team_config, members=flo_teams).build()
router = FloRouterFactory.create(session, team_config, flo_team)
flo_routed_team = router.build_routed_team()
return flo_routed_team
Expand Down
18 changes: 9 additions & 9 deletions flo_ai/common/flo_langchain_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,40 +14,40 @@ def __init__(self,
self.session_id = session_id

def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> None:
self.logger.info(f"Session ID: {self.session_id}: onLLMStart: {prompts}")
self.logger.debug(f"Session ID: {self.session_id}: onLLMStart: {prompts}")

def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
self.logger.debug(f"Session ID: {self.session_id}: onNewToken: {token}")

def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self.logger.info(f"Session ID: {self.session_id}: onLLMEnd: {response.generations}")
self.logger.debug(f"Session ID: {self.session_id}: onLLMEnd: {response.generations}")

def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:
self.logger.error(f"Session ID: {self.session_id}: onLLMError: {error}")

def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) -> None:
self.logger.info(f"Session ID: {self.session_id}: onChainStart: {inputs}")
self.logger.debug(f"Session ID: {self.session_id}: onChainStart: {inputs}")

def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
self.logger.info(f"Session ID: {self.session_id}: onChainEnd: {outputs}")
self.logger.debug(f"Session ID: {self.session_id}: onChainEnd: {outputs}")

def on_chain_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:
self.logger.error(f"Session ID: {self.session_id}: onChainError: {error}")

def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **kwargs: Any) -> None:
self.logger.info(f"Session ID: {self.session_id}: onToolStart: {input_str}")
self.logger.debug(f"Session ID: {self.session_id}: onToolStart: {input_str}")

def on_tool_end(self, output: str, **kwargs: Any) -> None:
self.logger.info(f"Session ID: {self.session_id}: onToolEnd: {output}")
self.logger.debug(f"Session ID: {self.session_id}: onToolEnd: {output}")

def on_tool_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:
self.logger.error(f"Session ID: {self.session_id}: onToolError: {error}")

def on_text(self, text: str, **kwargs: Any) -> None:
self.logger.info(f"Session ID: {self.session_id}: onText: {text}")
self.logger.debug(f"Session ID: {self.session_id}: onText: {text}")

def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
self.logger.info(f"Session ID: {self.session_id}: onAgentAction: {action.tool} - {action.tool_input}")
self.logger.debug(f"Session ID: {self.session_id}: onAgentAction: {action.tool} - {action.tool_input}")

def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
self.logger.info(f"Session ID: {self.session_id}: onAgentFinish: {finish.return_values}")
self.logger.debug(f"Session ID: {self.session_id}: onAgentFinish: {finish.return_values}")
Loading