diff --git a/basics/hitl/overview.mdx b/basics/hitl/overview.mdx index 3248909c..54ffdcd2 100644 --- a/basics/hitl/overview.mdx +++ b/basics/hitl/overview.mdx @@ -1,6 +1,9 @@ --- title: Human-in-the-Loop in Agents -description: Learn how to control the flow of an agent's execution in Agno. +sidebarTitle: Overview +description: Learn how to control the flow of an agent's execution in Agno with human oversight and input. +keywords: [human-in-the-loop, HITL, user confirmation, user input, external execution, control flow] +mode: wide --- Human-in-the-Loop (HITL) in Agno enable you to implement patterns where human oversight and input are required during agent execution. This is crucial for: @@ -404,8 +407,41 @@ if run_response.is_paused: 2. **Error Handling**: Implement proper error handling for user input and external calls 3. **Input Validation**: Validate user input before processing +## Learn More + + + + Require explicit user approval before executing tool calls + + + Gather specific information from users during execution + + + Let agents request user input dynamically when needed + + + Execute tools outside of the agent's control + + + ## Developer Resources -- View more [Examples](/examples/basics/agent/human_in_the_loop/) +- View more [Examples](/examples/basics/agent/hitl/) - View [Cookbook](https://github.com/agno-agi/agno/tree/main/cookbook/agents/human_in_the_loop) diff --git a/docs.json b/docs.json index 97ab1a74..fee10ca3 100644 --- a/docs.json +++ b/docs.json @@ -1310,29 +1310,40 @@ "pages": [ "basics/hitl/overview", { - "group": "Examples", + "group": "Usage", "pages": [ - "examples/basics/agent/human_in_the_loop/agentic_user_input", - "examples/basics/agent/human_in_the_loop/confirmation_required", - "examples/basics/agent/human_in_the_loop/confirmation_required_async", - "examples/basics/agent/human_in_the_loop/confirmation_required_mixed_tools", - "examples/basics/agent/human_in_the_loop/confirmation_required_multiple_tools", - "examples/basics/agent/human_in_the_loop/confirmation_required_stream", - "examples/basics/agent/human_in_the_loop/confirmation_required_stream_async", - "examples/basics/agent/human_in_the_loop/confirmation_required_toolkit", - "examples/basics/agent/human_in_the_loop/confirmation_required_with_history", - "examples/basics/agent/human_in_the_loop/confirmation_required_with_run_id", - "examples/basics/agent/human_in_the_loop/external_tool_execution", - "examples/basics/agent/human_in_the_loop/external_tool_execution_async", - "examples/basics/agent/human_in_the_loop/external_tool_execution_async_responses", - "examples/basics/agent/human_in_the_loop/external_tool_execution_stream", - "examples/basics/agent/human_in_the_loop/external_tool_execution_stream_async", - "examples/basics/agent/human_in_the_loop/external_tool_execution_toolkit", - "examples/basics/agent/human_in_the_loop/user_input_required", - "examples/basics/agent/human_in_the_loop/user_input_required_all_fields", - "examples/basics/agent/human_in_the_loop/user_input_required_async", - "examples/basics/agent/human_in_the_loop/user_input_required_stream", - "examples/basics/agent/human_in_the_loop/user_input_required_stream_async" + "examples/basics/agent/hitl/agentic-user-input", + { + "group": "Confirmation Required", + "pages": [ + "examples/basics/agent/hitl/confirmation-required", + "examples/basics/agent/hitl/confirmation-required-async", + "examples/basics/agent/hitl/confirmation-required-mixed-tools", + "examples/basics/agent/hitl/confirmation-required-multiple-tools", + "examples/basics/agent/hitl/confirmation-required-stream-async", + "examples/basics/agent/hitl/confirmation-required-toolkit", + "examples/basics/agent/hitl/confirmation-required-with-history", + "examples/basics/agent/hitl/confirmation-required-with-run-id" + ] + }, + { + "group": "User Input Required", + "pages": [ + "examples/basics/agent/hitl/user-input-required", + "examples/basics/agent/hitl/user-input-required-all-fields", + "examples/basics/agent/hitl/user-input-required-async", + "examples/basics/agent/hitl/user-input-required-stream-async" + ] + }, + { + "group": "External Tool Execution", + "pages": [ + "examples/basics/agent/hitl/external-tool-execution", + "examples/basics/agent/hitl/external-tool-execution-async", + "examples/basics/agent/hitl/external-tool-execution-stream-async", + "examples/basics/agent/hitl/external-tool-execution-toolkit" + ] + } ] } ] diff --git a/examples/basics/agent/hitl/agentic-user-input.mdx b/examples/basics/agent/hitl/agentic-user-input.mdx new file mode 100644 index 00000000..8656942f --- /dev/null +++ b/examples/basics/agent/hitl/agentic-user-input.mdx @@ -0,0 +1,182 @@ +--- +title: Agentic User Input with Control Flow +sidebarTitle: Agentic User Input +keywords: [UserControlFlowTools, dynamic user input, agentic input, form-like interaction, control flow] +description: This example demonstrates how to use UserControlFlowTools to allow agents to dynamically request user input when they need additional information to complete tasks. +mode: wide +--- + + + + + Create a Python file for the example. + ```bash + touch agentic_user_input.py + ``` + + + + ```python agentic_user_input.py + from typing import Any, Dict, List + + from agno.agent import Agent + from agno.models.openai import OpenAIChat + from agno.tools import Toolkit + from agno.tools.function import UserInputField + from agno.tools.user_control_flow import UserControlFlowTools + from agno.utils import pprint + + + class EmailTools(Toolkit): + def __init__(self, *args, **kwargs): + super().__init__( + name="EmailTools", tools=[self.send_email, self.get_emails], *args, **kwargs + ) + + def send_email(self, subject: str, body: str, to_address: str) -> str: + """Send an email to the given address with the given subject and body. + + Args: + subject (str): The subject of the email. + body (str): The body of the email. + to_address (str): The address to send the email to. + """ + return f"Sent email to {to_address} with subject {subject} and body {body}" + + def get_emails(self, date_from: str, date_to: str) -> list[dict[str, str]]: + """Get all emails between the given dates. + + Args: + date_from (str): The start date (in YYYY-MM-DD format). + date_to (str): The end date (in YYYY-MM-DD format). + """ + return [ + { + "subject": "Hello", + "body": "Hello, world!", + "to_address": "test@test.com", + "date": date_from, + }, + { + "subject": "Random other email", + "body": "This is a random other email", + "to_address": "john@doe.com", + "date": date_to, + }, + ] + + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[EmailTools(), UserControlFlowTools()], + markdown=True, + ) + + run_response = agent.run("Send an email with the body 'What is the weather in Tokyo?'") + + # We use a while loop to continue the running until the agent is satisfied with the user input + while run_response.is_paused: + for tool in run_response.tools_requiring_user_input: + input_schema: List[UserInputField] = tool.user_input_schema # type: ignore + + for field in input_schema: + # Get user input for each field in the schema + field_type = field.field_type # type: ignore + field_description = field.description # type: ignore + + # Display field information to the user + print(f"\nField: {field.name}") # type: ignore + print(f"Description: {field_description}") + print(f"Type: {field_type}") + + # Get user input + if field.value is None: # type: ignore + user_value = input(f"Please enter a value for {field.name}: ") # type: ignore + else: + print(f"Value: {field.value}") # type: ignore + user_value = field.value # type: ignore + + # Update the field value + field.value = user_value # type: ignore + + run_response = agent.continue_run(run_response=run_response) + if not run_response.is_paused: + pprint.pprint_run_response(run_response) + break + + + run_response = agent.run("Get me all my emails") + + while run_response.is_paused: + for tool in run_response.tools_requiring_user_input: + input_schema: Dict[str, Any] = tool.user_input_schema # type: ignore + + for field in input_schema: + # Get user input for each field in the schema + field_type = field.field_type # type: ignore + field_description = field.description # type: ignore + + # Display field information to the user + print(f"\nField: {field.name}") # type: ignore + print(f"Description: {field_description}") + print(f"Type: {field_type}") + + # Get user input + if field.value is None: # type: ignore + user_value = input(f"Please enter a value for {field.name}: ") # type: ignore + else: + print(f"Value: {field.value}") # type: ignore + user_value = field.value # type: ignore + + # Update the field value + field.value = user_value # type: ignore + + run_response = agent.continue_run(run_response=run_response) + if not run_response.is_paused: + pprint.pprint_run_response(run_response) + break + ``` + + + + + + ```bash + pip install -U agno openai + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python agentic_user_input.py + ``` + + ```bash Windows + python agentic_user_input.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/confirmation-required-async.mdx b/examples/basics/agent/hitl/confirmation-required-async.mdx new file mode 100644 index 00000000..97bd4f88 --- /dev/null +++ b/examples/basics/agent/hitl/confirmation-required-async.mdx @@ -0,0 +1,140 @@ +--- +title: Async Tool Confirmation Required +sidebarTitle: Async Confirmation +keywords: [async, asynchronous, confirmation, acontinue_run, arun] +description: This example demonstrates how to implement human-in-the-loop functionality with async agents, requiring user confirmation before executing tool operations. +mode: wide +--- + + + + + Create a Python file and add the above code. + ```bash + touch confirmation_required_async.py + ``` + + + + + ```python confirmation_required_async.py + import asyncio + import json + import httpx + from agno.agent import Agent + from agno.models.openai import OpenAIChat + from agno.tools import tool + from agno.utils import pprint + from rich.console import Console + from rich.prompt import Prompt + + console = Console() + + + @tool(requires_confirmation=True) + def get_top_hackernews_stories(num_stories: int) -> str: + """Fetch top stories from Hacker News. + + Args: + num_stories (int): Number of stories to retrieve + + Returns: + str: JSON string containing story details + """ + # Fetch top story IDs + response = httpx.get("https://hacker-news.firebaseio.com/v0/topstories.json") + story_ids = response.json() + + # Yield story details + all_stories = [] + for story_id in story_ids[:num_stories]: + story_response = httpx.get( + f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" + ) + story = story_response.json() + if "text" in story: + story.pop("text", None) + all_stories.append(story) + return json.dumps(all_stories) + + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[get_top_hackernews_stories], + markdown=True, + ) + + run_response = asyncio.run(agent.arun("Fetch the top 2 hackernews stories")) + if run_response.is_paused: + for tool in run_response.tools_requiring_confirmation: + # Ask for confirmation + console.print( + f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] requires confirmation." + ) + message = ( + Prompt.ask("Do you want to continue?", choices=["y", "n"], default="y") + .strip() + .lower() + ) + + if message == "n": + tool.confirmed = False + else: + # We update the tools in place + tool.confirmed = True + + + run_response = asyncio.run(agent.acontinue_run(run_response=run_response)) + # Or + # run_response = asyncio.run(agent.acontinue_run(run_id=run_response.run_id)) + + pprint.pprint_run_response(run_response) + + + # Or for simple debug flow + # asyncio.run(agent.aprint_response("Fetch the top 2 hackernews stories")) + ``` + + + + + + ```bash + pip install -U agno openai httpx rich + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python confirmation_required_async.py + ``` + + ```bash Windows + python confirmation_required_async.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/confirmation-required-mixed-tools.mdx b/examples/basics/agent/hitl/confirmation-required-mixed-tools.mdx new file mode 100644 index 00000000..7e628247 --- /dev/null +++ b/examples/basics/agent/hitl/confirmation-required-mixed-tools.mdx @@ -0,0 +1,152 @@ +--- +title: Confirmation Required with Mixed Tools +sidebarTitle: Mixed Tools Confirmation +keywords: [confirmation, mixed tools, selective confirmation, partial execution] +description: This example demonstrates human-in-the-loop functionality where only some tools require user confirmation. The agent executes tools that don't require confirmation automatically and pauses only for tools that need approval. +mode: wide +--- + + + + + Create a Python file and add the above code. + ```bash + touch confirmation_required_mixed_tools.py + ``` + + + + + ```python confirmation_required_mixed_tools.py + import json + import httpx + from agno.agent import Agent + from agno.models.openai import OpenAIChat + from agno.tools import tool + from agno.utils import pprint + from rich.console import Console + from rich.prompt import Prompt + + console = Console() + + + def get_top_hackernews_stories(num_stories: int) -> str: + """Fetch top stories from Hacker News. + + Args: + num_stories (int): Number of stories to retrieve + + Returns: + str: JSON string containing story details + """ + # Fetch top story IDs + response = httpx.get("https://hacker-news.firebaseio.com/v0/topstories.json") + story_ids = response.json() + + # Yield story details + all_stories = [] + for story_id in story_ids[:num_stories]: + story_response = httpx.get( + f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" + ) + story = story_response.json() + if "text" in story: + story.pop("text", None) + all_stories.append(story) + return json.dumps(all_stories) + + + @tool(requires_confirmation=True) + def send_email(to: str, subject: str, body: str) -> str: + """Send an email. + + Args: + to (str): Email address to send to + subject (str): Subject of the email + body (str): Body of the email + """ + return f"Email sent to {to} with subject {subject} and body {body}" + + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[get_top_hackernews_stories, send_email], + markdown=True, + ) + + run_response = agent.run( + "Fetch the top 2 hackernews stories and email them to john@doe.com." + ) + if run_response.is_paused: + for tool in run_response.tools: # type: ignore + if tool.requires_confirmation: + # Ask for confirmation + console.print( + f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] requires confirmation." + ) + message = ( + Prompt.ask("Do you want to continue?", choices=["y", "n"], default="y") + .strip() + .lower() + ) + + if message == "n": + tool.confirmed = False + else: + # We update the tools in place + tool.confirmed = True + else: + console.print( + f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] was completed in [bold green]{tool.metrics.duration:.2f}[/] seconds." # type: ignore + ) + + run_response = agent.continue_run(run_response=run_response) + pprint.pprint_run_response(run_response) + + # Or for simple debug flow + # agent.print_response("Fetch the top 2 hackernews stories") + ``` + + + + + + ```bash + pip install -U agno openai httpx rich + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python confirmation_required_mixed_tools.py + ``` + + ```bash Windows + python confirmation_required_mixed_tools.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/confirmation-required-multiple-tools.mdx b/examples/basics/agent/hitl/confirmation-required-multiple-tools.mdx new file mode 100644 index 00000000..bc804dc2 --- /dev/null +++ b/examples/basics/agent/hitl/confirmation-required-multiple-tools.mdx @@ -0,0 +1,140 @@ +--- +title: Confirmation Required with Multiple Tools +sidebarTitle: Multiple Tools Confirmation +keywords: [confirmation, multiple tools, wikipedia, loop confirmation] +description: This example demonstrates human-in-the-loop functionality with multiple tools that require confirmation. It shows how to handle user confirmation during tool execution and gracefully cancel operations based on user choice. +mode: wide +--- + + + + + Create a Python file and add the above code. + ```bash + touch confirmation_required_multiple_tools.py + ``` + + + + + ```python confirmation_required_multiple_tools.py + import json + import httpx + from agno.agent import Agent + from agno.models.openai import OpenAIChat + from agno.tools import tool + from agno.tools.wikipedia import WikipediaTools + from agno.utils import pprint + from rich.console import Console + from rich.prompt import Prompt + + console = Console() + + + @tool(requires_confirmation=True) + def get_top_hackernews_stories(num_stories: int) -> str: + """Fetch top stories from Hacker News. + + Args: + num_stories (int): Number of stories to retrieve + + Returns: + str: JSON string containing story details + """ + # Fetch top story IDs + response = httpx.get("https://hacker-news.firebaseio.com/v0/topstories.json") + story_ids = response.json() + + # Yield story details + all_stories = [] + for story_id in story_ids[:num_stories]: + story_response = httpx.get( + f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" + ) + story = story_response.json() + if "text" in story: + story.pop("text", None) + all_stories.append(story) + return json.dumps(all_stories) + + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[ + get_top_hackernews_stories, + WikipediaTools(requires_confirmation_tools=["search_wikipedia"]), + ], + markdown=True, + ) + + run_response = agent.run( + "Fetch 2 articles about the topic 'python'. You can choose which source to use, but only use one source." + ) + while run_response.is_paused: + for tool_exc in run_response.tools_requiring_confirmation: # type: ignore + # Ask for confirmation + console.print( + f"Tool name [bold blue]{tool_exc.tool_name}({tool_exc.tool_args})[/] requires confirmation." + ) + message = ( + Prompt.ask("Do you want to continue?", choices=["y", "n"], default="y") + .strip() + .lower() + ) + + if message == "n": + tool.confirmed = False + tool.confirmation_note = ( + "This is not the right tool to use. Use the other tool!" + ) + else: + # We update the tools in place + tool.confirmed = True + + run_response = agent.continue_run(run_response=run_response) + pprint.pprint_run_response(run_response) + ``` + + + + + + ```bash + pip install -U agno openai httpx rich + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python confirmation_required_multiple_tools.py + ``` + + ```bash Windows + python confirmation_required_multiple_tools.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/confirmation-required-stream-async.mdx b/examples/basics/agent/hitl/confirmation-required-stream-async.mdx new file mode 100644 index 00000000..afbd83d1 --- /dev/null +++ b/examples/basics/agent/hitl/confirmation-required-stream-async.mdx @@ -0,0 +1,148 @@ +--- +title: Confirmation Required with Async Streaming +sidebarTitle: Stream Confirmation +keywords: [confirmation, async streaming, arun stream, real-time async] +description: This example demonstrates human-in-the-loop functionality with asynchronous streaming responses. It shows how to handle user confirmation during tool execution in an async environment while maintaining real-time streaming. +mode: wide +--- + + + + + Create a Python file and add the above code. + ```bash + touch confirmation_required_stream_async.py + ``` + + + + + ```python confirmation_required_stream_async.py + import asyncio + import json + import httpx + from agno.agent import Agent + from agno.db.sqlite import SqliteDb + from agno.models.openai import OpenAIChat + from agno.tools import tool + from rich.console import Console + from rich.prompt import Prompt + + console = Console() + + + @tool(requires_confirmation=True) + def get_top_hackernews_stories(num_stories: int) -> str: + """Fetch top stories from Hacker News. + + Args: + num_stories (int): Number of stories to retrieve + + Returns: + str: JSON string containing story details + """ + # Fetch top story IDs + response = httpx.get("https://hacker-news.firebaseio.com/v0/topstories.json") + story_ids = response.json() + + # Yield story details + all_stories = [] + for story_id in story_ids[:num_stories]: + story_response = httpx.get( + f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" + ) + story = story_response.json() + if "text" in story: + story.pop("text", None) + all_stories.append(story) + return json.dumps(all_stories) + + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[get_top_hackernews_stories], + db=SqliteDb(session_table="test_session", db_file="tmp/example.db"), + markdown=True, + ) + + + async def main(): + async for run_event in agent.arun( + "Fetch the top 2 hackernews stories", stream=True + ): + if run_event.is_paused: + for tool in run_event.tools_requiring_confirmation: # type: ignore + # Ask for confirmation + console.print( + f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] requires confirmation." + ) + message = ( + Prompt.ask( + "Do you want to continue?", choices=["y", "n"], default="y" + ) + .strip() + .lower() + ) + + if message == "n": + tool.confirmed = False + else: + # We update the tools in place + tool.confirmed = True + + async for resp in agent.acontinue_run( # type: ignore + run_id=run_event.run_id, updated_tools=run_event.tools, stream=True + ): + print(resp.content, end="") + + # Or for simple debug flow + # await agent.aprint_response("Fetch the top 2 hackernews stories", stream=True) + + + if __name__ == "__main__": + asyncio.run(main()) + ``` + + + + + + ```bash + pip install -U agno openai httpx rich + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python confirmation_required_stream_async.py + ``` + + ```bash Windows + python confirmation_required_stream_async.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/confirmation-required-toolkit.mdx b/examples/basics/agent/hitl/confirmation-required-toolkit.mdx new file mode 100644 index 00000000..6e0bb65d --- /dev/null +++ b/examples/basics/agent/hitl/confirmation-required-toolkit.mdx @@ -0,0 +1,102 @@ +--- +title: Confirmation Required with Toolkit +sidebarTitle: Confirmation in Toolkit +keywords: [confirmation, toolkit, duckduckgo, pre-built tools] +description: This example demonstrates human-in-the-loop functionality using toolkit-based tools that require confirmation. It shows how to handle user confirmation when working with pre-built tool collections like DuckDuckGoTools. +mode: wide +--- + + + + + Create a Python file and add the above code. + ```bash + touch confirmation_required_toolkit.py + ``` + + + + + ```python confirmation_required_toolkit.py + from agno.agent import Agent + from agno.models.openai import OpenAIChat + from agno.tools import tool + from agno.tools.duckduckgo import DuckDuckGoTools + from agno.utils import pprint + from rich.console import Console + from rich.prompt import Prompt + + console = Console() + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[DuckDuckGoTools(requires_confirmation_tools=["get_current_stock_price"])], + markdown=True, + ) + + run_response = agent.run("What is the current stock price of Apple?") + if run_response.is_paused: # Or agent.run_response.is_paused + for tool in run_response.tools_requiring_confirmation: # type: ignore + # Ask for confirmation + console.print( + f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] requires confirmation." + ) + message = ( + Prompt.ask("Do you want to continue?", choices=["y", "n"], default="y") + .strip() + .lower() + ) + + if message == "n": + tool.confirmed = False + else: + # We update the tools in place + tool.confirmed = True + + run_response = agent.continue_run(run_response=run_response) + pprint.pprint_run_response(run_response) + ``` + + + + + + ```bash + pip install -U agno openai ddgs rich + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python confirmation_required_toolkit.py + ``` + + ```bash Windows + python confirmation_required_toolkit.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/confirmation-required-with-history.mdx b/examples/basics/agent/hitl/confirmation-required-with-history.mdx new file mode 100644 index 00000000..85e0d6d8 --- /dev/null +++ b/examples/basics/agent/hitl/confirmation-required-with-history.mdx @@ -0,0 +1,135 @@ +--- +title: Confirmation Required with History +sidebarTitle: Confirmation with History +keywords: [confirmation, history, conversation context, add_history_to_context] +description: This example demonstrates human-in-the-loop functionality while maintaining conversation history. It shows how user confirmation works when the agent has access to previous conversation context. +mode: wide +--- + + + + + Create a Python file and add the above code. + ```bash + touch confirmation_required_with_history.py + ``` + + + + + ```python confirmation_required_with_history.py + import json + import httpx + from agno.agent import Agent + from agno.models.openai import OpenAIChat + from agno.tools import tool + from agno.utils import pprint + from rich.console import Console + from rich.prompt import Prompt + + console = Console() + + + @tool(requires_confirmation=True) + def get_top_hackernews_stories(num_stories: int) -> str: + """Fetch top stories from Hacker News. + + Args: + num_stories (int): Number of stories to retrieve + + Returns: + str: JSON string containing story details + """ + # Fetch top story IDs + response = httpx.get("https://hacker-news.firebaseio.com/v0/topstories.json") + story_ids = response.json() + + # Yield story details + all_stories = [] + for story_id in story_ids[:num_stories]: + story_response = httpx.get( + f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" + ) + story = story_response.json() + if "text" in story: + story.pop("text", None) + all_stories.append(story) + return json.dumps(all_stories) + + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[get_top_hackernews_stories], + add_history_to_context=True, + num_history_runs=2, + markdown=True, + ) + + agent.run("What can you do?") + + run_response = agent.run("Fetch the top 2 hackernews stories.") + if run_response.is_paused: + for tool in run_response.tools_requiring_confirmation: # type: ignore + # Ask for confirmation + console.print( + f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] requires confirmation." + ) + message = ( + Prompt.ask("Do you want to continue?", choices=["y", "n"], default="y") + .strip() + .lower() + ) + + if message == "n": + tool.confirmed = False + else: + # We update the tools in place + tool.confirmed = True + + run_response = agent.continue_run(run_response=run_response) + pprint.pprint_run_response(run_response) + ``` + + + + + + ```bash + pip install -U agno openai httpx rich + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python confirmation_required_with_history.py + ``` + + ```bash Windows + python confirmation_required_with_history.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/confirmation-required-with-run-id.mdx b/examples/basics/agent/hitl/confirmation-required-with-run-id.mdx new file mode 100644 index 00000000..d50722fc --- /dev/null +++ b/examples/basics/agent/hitl/confirmation-required-with-run-id.mdx @@ -0,0 +1,141 @@ +--- +title: Confirmation Required with Run ID +sidebarTitle: Confirmation with Run ID +keywords: [confirmation, run_id, session management, updated_tools] +description: This example demonstrates human-in-the-loop functionality using specific run IDs for session management. It shows how to continue agent execution with updated tools using run identifiers. +mode: wide +--- + + + + + Create a Python file and add the above code. + ```bash + touch confirmation_required_with_run_id.py + ``` + + + + + ```python confirmation_required_with_run_id.py + + import json + + import httpx + from agno.agent import Agent + from agno.db.sqlite import SqliteDb + from agno.models.openai import OpenAIChat + from agno.tools import tool + from agno.utils import pprint + from rich.console import Console + from rich.prompt import Prompt + + console = Console() + + + @tool(requires_confirmation=True) + def get_top_hackernews_stories(num_stories: int) -> str: + """Fetch top stories from Hacker News. + + Args: + num_stories (int): Number of stories to retrieve + + Returns: + str: JSON string containing story details + """ + # Fetch top story IDs + response = httpx.get("https://hacker-news.firebaseio.com/v0/topstories.json") + story_ids = response.json() + + # Yield story details + all_stories = [] + for story_id in story_ids[:num_stories]: + story_response = httpx.get( + f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" + ) + story = story_response.json() + if "text" in story: + story.pop("text", None) + all_stories.append(story) + return json.dumps(all_stories) + + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[get_top_hackernews_stories], + markdown=True, + db=SqliteDb(session_table="test_session", db_file="tmp/example.db"), + ) + + run_response = agent.run("Fetch the top 2 hackernews stories.") + if run_response.is_paused: + for tool in run_response.tools_requiring_confirmation: # type: ignore + # Ask for confirmation + console.print( + f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] requires confirmation." + ) + message = ( + Prompt.ask("Do you want to continue?", choices=["y", "n"], default="y") + .strip() + .lower() + ) + + if message == "n": + tool.confirmed = False + else: + # We update the tools in place + tool.confirmed = True + + updated_tools = run_response.tools + + run_response = agent.continue_run( + run_id=run_response.run_id, + updated_tools=updated_tools, + ) + + pprint.pprint_run_response(run_response) + ``` + + + + + + ```bash + pip install -U agno openai httpx rich + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python confirmation_required_with_run_id.py + ``` + + ```bash Windows + python confirmation_required_with_run_id.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/confirmation-required.mdx b/examples/basics/agent/hitl/confirmation-required.mdx new file mode 100644 index 00000000..c59fd8e8 --- /dev/null +++ b/examples/basics/agent/hitl/confirmation-required.mdx @@ -0,0 +1,139 @@ +--- +title: Tool Confirmation Required +sidebarTitle: Confirmation Required +keywords: [user confirmation, requires_confirmation, tool approval] +description: This example demonstrates how to implement human-in-the-loop functionality by requiring user confirmation before executing sensitive tool operations. +mode: wide +--- + + + + + Create a Python file for the example. + ```bash + touch confirmation_required.py + ``` + + + + ```python confirmation_required.py + import json + import httpx + from agno.agent import Agent + from agno.db.sqlite import SqliteDb + from agno.models.openai import OpenAIChat + from agno.tools import tool + from agno.utils import pprint + from rich.console import Console + from rich.prompt import Prompt + + console = Console() + + + @tool(requires_confirmation=True) + def get_top_hackernews_stories(num_stories: int) -> str: + """Fetch top stories from Hacker News. + + Args: + num_stories (int): Number of stories to retrieve + + Returns: + str: JSON string containing story details + """ + # Fetch top story IDs + response = httpx.get("https://hacker-news.firebaseio.com/v0/topstories.json") + story_ids = response.json() + + # Yield story details + all_stories = [] + for story_id in story_ids[:num_stories]: + story_response = httpx.get( + f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" + ) + story = story_response.json() + if "text" in story: + story.pop("text", None) + all_stories.append(story) + return json.dumps(all_stories) + + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[get_top_hackernews_stories], + markdown=True, + db=SqliteDb(session_table="test_session", db_file="tmp/example.db"), + ) + + run_response = agent.run("Fetch the top 2 hackernews stories.") + if run_response.is_paused: + for tool in run_response.tools_requiring_confirmation: + # Ask for confirmation + console.print( + f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] requires confirmation." + ) + message = ( + Prompt.ask("Do you want to continue?", choices=["y", "n"], default="y") + .strip() + .lower() + ) + + if message == "n": + tool.confirmed = False + else: + # We update the tools in place + tool.confirmed = True + + run_response = agent.continue_run(run_response=run_response) + # Or + # run_response = agent.continue_run(run_id=run_response.run_id, updated_tools=run_response.tools) + + pprint.pprint_run_response(run_response) + + + # Or for simple debug flow + # agent.print_response("Fetch the top 2 hackernews stories") + ``` + + + + + + ```bash + pip install -U agno openai httpx rich + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python confirmation_required.py + ``` + + ```bash Windows + python confirmation_required.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/external-tool-execution-async.mdx b/examples/basics/agent/hitl/external-tool-execution-async.mdx new file mode 100644 index 00000000..513a3357 --- /dev/null +++ b/examples/basics/agent/hitl/external-tool-execution-async.mdx @@ -0,0 +1,112 @@ +--- +title: External Tool Execution Async +sidebarTitle: External Execution (Async) +keywords: [external execution, async, external_tool_execution, shell command] +description: This example demonstrates how to execute tools outside of the agent using external tool execution in an asynchronous environment. This pattern allows you to control tool execution externally while maintaining agent functionality with async operations. +mode: wide +--- + + + + + Create a Python file and add the above code. + ```bash + touch external_tool_execution_async.py + ``` + + + + + ```python external_tool_execution_async.py + import asyncio + import subprocess + from agno.agent import Agent + from agno.models.openai import OpenAIChat + from agno.tools import tool + from agno.utils import pprint + + + # We have to create a tool with the correct name, arguments and docstring for the agent to know what to call. + @tool(external_execution=True) + def execute_shell_command(command: str) -> str: + """Execute a shell command. + + Args: + command (str): The shell command to execute + + Returns: + str: The output of the shell command + """ + if command.startswith("ls"): + return subprocess.check_output(command, shell=True).decode("utf-8") + else: + raise Exception(f"Unsupported command: {command}") + + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[execute_shell_command], + markdown=True, + ) + + run_response = asyncio.run(agent.arun("What files do I have in my current directory?")) + if run_response.is_paused: + for tool in run_response.tools_awaiting_external_execution: + if tool.tool_name == execute_shell_command.name: + print(f"Executing {tool.tool_name} with args {tool.tool_args} externally") + # We execute the tool ourselves. You can also execute something completely external here. + result = execute_shell_command.entrypoint(**tool.tool_args) # type: ignore + # We have to set the result on the tool execution object so that the agent can continue + tool.result = result + + run_response = asyncio.run(agent.acontinue_run(run_response=run_response)) + pprint.pprint_run_response(run_response) + + + # Or for simple debug flow + # agent.print_response("What files do I have in my current directory?") + ``` + + + + + + ```bash + pip install -U agno openai + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python external_tool_execution_async.py + ``` + + ```bash Windows + python external_tool_execution_async.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/external-tool-execution-stream-async.mdx b/examples/basics/agent/hitl/external-tool-execution-stream-async.mdx new file mode 100644 index 00000000..2b7a77f1 --- /dev/null +++ b/examples/basics/agent/hitl/external-tool-execution-stream-async.mdx @@ -0,0 +1,128 @@ +--- +title: External Tool Execution Stream Async +sidebarTitle: External Execution (Stream) +keywords: [external execution, async streaming, arun stream, external tools async] +description: This example demonstrates how to execute tools outside of the agent using external tool execution with async streaming responses. It shows how to handle external tool execution in an asynchronous environment while maintaining real-time streaming. +mode: wide +--- + + + + + Create a Python file and add the above code. + ```bash + touch external_tool_execution_stream_async.py + ``` + + + + + ```python external_tool_execution_stream_async.py + import asyncio + import subprocess + from agno.agent import Agent + from agno.db.sqlite import SqliteDb + from agno.models.openai import OpenAIChat + from agno.tools import tool + + + # We have to create a tool with the correct name, arguments and docstring for the agent to know what to call. + @tool(external_execution=True) + def execute_shell_command(command: str) -> str: + """Execute a shell command. + + Args: + command (str): The shell command to execute + + Returns: + str: The output of the shell command + """ + if command.startswith("ls"): + return subprocess.check_output(command, shell=True).decode("utf-8") + else: + raise Exception(f"Unsupported command: {command}") + + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[execute_shell_command], + markdown=True, + db=SqliteDb(session_table="test_session", db_file="tmp/example.db"), + ) + + + async def main(): + async for run_event in agent.arun( + "What files do I have in my current directory?", stream=True + ): + if run_event.is_paused: + for tool in run_event.tools_awaiting_external_execution: # type: ignore + if tool.tool_name == execute_shell_command.name: + print( + f"Executing {tool.tool_name} with args {tool.tool_args} externally" + ) + # We execute the tool ourselves. You can also execute something completely external here. + result = execute_shell_command.entrypoint(**tool.tool_args) # type: ignore + # We have to set the result on the tool execution object so that the agent can continue + tool.result = result + + async for resp in agent.acontinue_run( # type: ignore + run_id=run_event.run_id, + updated_tools=run_event.tools, + stream=True, + ): + print(resp.content, end="") + else: + print(run_event.content, end="") + + # Or for simple debug flow + # agent.print_response("What files do I have in my current directory?", stream=True) + + + if __name__ == "__main__": + asyncio.run(main()) + ``` + + + + + + ```bash + pip install -U agno openai + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python external_tool_execution_stream_async.py + ``` + + ```bash Windows + python external_tool_execution_stream_async.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/external-tool-execution-toolkit.mdx b/examples/basics/agent/hitl/external-tool-execution-toolkit.mdx new file mode 100644 index 00000000..0ec3bd87 --- /dev/null +++ b/examples/basics/agent/hitl/external-tool-execution-toolkit.mdx @@ -0,0 +1,115 @@ +--- +title: External Tool Execution Toolkit +sidebarTitle: External Execution (Toolkit) +keywords: [external execution, toolkit, custom toolkit, external_execution_required_tools] +description: This example demonstrates how to execute toolkit-based tools outside of the agent using external tool execution. It shows how to create a custom toolkit with tools that require external execution. +mode: wide +--- + + + + + Create a Python file and add the above code. + ```bash + touch external_tool_execution_toolkit.py + ``` + + + + + ```python external_tool_execution_toolkit.py + import subprocess + from agno.agent import Agent + from agno.models.openai import OpenAIChat + from agno.tools import tool + from agno.tools.toolkit import Toolkit + from agno.utils import pprint + + + class ShellTools(Toolkit): + def __init__(self, *args, **kwargs): + super().__init__( + tools=[self.list_dir], + external_execution_required_tools=["list_dir"], + *args, + **kwargs, + ) + + def list_dir(self, directory: str): + """ + Lists the contents of a directory. + + Args: + directory: The directory to list. + + Returns: + A string containing the contents of the directory. + """ + return subprocess.check_output(f"ls {directory}", shell=True).decode("utf-8") + + + tools = ShellTools() + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[tools], + markdown=True, + ) + + run_response = agent.run("What files do I have in my current directory?") + if run_response.is_paused: + for tool in run_response.tools_awaiting_external_execution: + if tool.tool_name == "list_dir": + print(f"Executing {tool.tool_name} with args {tool.tool_args} externally") + # We execute the tool ourselves. You can also execute something completely external here. + result = tools.list_dir(**tool.tool_args) # type: ignore + # We have to set the result on the tool execution object so that the agent can continue + tool.result = result + + run_response = agent.continue_run(run_response=run_response) + pprint.pprint_run_response(run_response) + ``` + + + + + + ```bash + pip install -U agno openai + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python external_tool_execution_toolkit.py + ``` + + ```bash Windows + python external_tool_execution_toolkit.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/external-tool-execution.mdx b/examples/basics/agent/hitl/external-tool-execution.mdx new file mode 100644 index 00000000..30860691 --- /dev/null +++ b/examples/basics/agent/hitl/external-tool-execution.mdx @@ -0,0 +1,110 @@ +--- +title: External Tool Execution +sidebarTitle: External Execution of Tool +keywords: [external execution, external_execution, tool control, subprocess, external service] +description: This example demonstrates how to execute tools outside of the agent using external tool execution. This pattern allows you to control tool execution externally while maintaining agent functionality. +mode: wide +--- + + + + + Create a Python file for the example. + ```bash + touch external_tool_execution.py + ``` + + + + ```python external_tool_execution.py + import subprocess + from agno.agent import Agent + from agno.models.openai import OpenAIChat + from agno.tools import tool + from agno.utils import pprint + + + # We have to create a tool with the correct name, arguments and docstring for the agent to know what to call. + @tool(external_execution=True) + def execute_shell_command(command: str) -> str: + """Execute a shell command. + + Args: + command (str): The shell command to execute + + Returns: + str: The output of the shell command + """ + if command.startswith("ls"): + return subprocess.check_output(command, shell=True).decode("utf-8") + else: + raise Exception(f"Unsupported command: {command}") + + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[execute_shell_command], + markdown=True, + ) + + run_response = agent.run("What files do I have in my current directory?") + if run_response.is_paused: + for tool in run_response.tools_awaiting_external_execution: + if tool.tool_name == execute_shell_command.name: + print(f"Executing {tool.tool_name} with args {tool.tool_args} externally") + # We execute the tool ourselves. You can also execute something completely external here. + result = execute_shell_command.entrypoint(**tool.tool_args) # type: ignore + # We have to set the result on the tool execution object so that the agent can continue + tool.result = result + + run_response = agent.continue_run(run_response=run_response) + pprint.pprint_run_response(run_response) + + + # Or for simple debug flow + # agent.print_response("What files do I have in my current directory?") + ``` + + + + + + ```bash + pip install -U agno openai + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python external_tool_execution.py + ``` + + ```bash Windows + python external_tool_execution.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/user-input-required-all-fields.mdx b/examples/basics/agent/hitl/user-input-required-all-fields.mdx new file mode 100644 index 00000000..8bfc56e1 --- /dev/null +++ b/examples/basics/agent/hitl/user-input-required-all-fields.mdx @@ -0,0 +1,119 @@ +--- +title: User Input Required All Fields +sidebarTitle: User Input All Fields +keywords: [user input, requires_user_input, all fields, UserInputField] +description: This example demonstrates how to use the requires_user_input parameter to collect input for all fields in a tool. It shows how to handle user input schema and collect values for each required field. +mode: wide +--- + + + + + Create a Python file and add the above code. + ```bash + touch user_input_required_all_fields.py + ``` + + + + + ```python user_input_required_all_fields.py + from typing import List + from agno.agent import Agent + from agno.models.openai import OpenAIChat + from agno.tools import tool + from agno.tools.function import UserInputField + from agno.utils import pprint + + + @tool(requires_user_input=True) + def send_email(subject: str, body: str, to_address: str) -> str: + """ + Send an email. + + Args: + subject (str): The subject of the email. + body (str): The body of the email. + to_address (str): The address to send the email to. + """ + return f"Sent email to {to_address} with subject {subject} and body {body}" + + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[send_email], + markdown=True, + ) + + run_response = agent.run("Send an email please") + if run_response.is_paused: # Or agent.run_response.is_paused + for tool in run_response.tools_requiring_user_input: # type: ignore + input_schema: List[UserInputField] = tool.user_input_schema # type: ignore + + for field in input_schema: + # Get user input for each field in the schema + field_type = field.field_type + field_description = field.description + + # Display field information to the user + print(f"\nField: {field.name}") + print(f"Description: {field_description}") + print(f"Type: {field_type}") + + # Get user input + if field.value is None: + user_value = input(f"Please enter a value for {field.name}: ") + + # Update the field value + field.value = user_value + + run_response = agent.continue_run(run_response=run_response) + pprint.pprint_run_response(run_response) + + # Or for simple debug flow + # agent.print_response("Send an email please") + ``` + + + + + + ```bash + pip install -U agno openai + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python user_input_required_all_fields.py + ``` + + ```bash Windows + python user_input_required_all_fields.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/user-input-required-async.mdx b/examples/basics/agent/hitl/user-input-required-async.mdx new file mode 100644 index 00000000..cb1bc287 --- /dev/null +++ b/examples/basics/agent/hitl/user-input-required-async.mdx @@ -0,0 +1,126 @@ +--- +title: User Input Required Async +sidebarTitle: User Input Async +keywords: [user input, async, requires_user_input, user_input_fields] +description: This example demonstrates how to use the requires_user_input parameter with asynchronous operations. It shows how to collect specific user input fields in an async environment. +mode: wide +--- + + + + + Create a Python file and add the above code. + ```bash + touch user_input_required_async.py + ``` + + + + + ```python user_input_required_async.py + import asyncio + from typing import List + from agno.agent import Agent + from agno.models.openai import OpenAIChat + from agno.tools import tool + from agno.tools.function import UserInputField + from agno.utils import pprint + + + # You can either specify the user_input_fields leave empty for all fields to be provided by the user + @tool(requires_user_input=True, user_input_fields=["to_address"]) + def send_email(subject: str, body: str, to_address: str) -> str: + """ + Send an email. + + Args: + subject (str): The subject of the email. + body (str): The body of the email. + to_address (str): The address to send the email to. + """ + return f"Sent email to {to_address} with subject {subject} and body {body}" + + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[send_email], + markdown=True, + ) + + run_response = asyncio.run( + agent.arun("Send an email with the subject 'Hello' and the body 'Hello, world!'") + ) + if run_response.is_paused: # Or agent.run_response.is_paused + for tool in run_response.tools_requiring_user_input: # type: ignore + input_schema: List[UserInputField] = tool.user_input_schema # type: ignore + + for field in input_schema: + # Get user input for each field in the schema + field_type = field.field_type + field_description = field.description + + # Display field information to the user + print(f"\nField: {field.name}") + print(f"Description: {field_description}") + print(f"Type: {field_type}") + + # Get user input + if field.value is None: + user_value = input(f"Please enter a value for {field.name}: ") + else: + print(f"Value: {field.value}") + user_value = field.value + + # Update the field value + field.value = user_value + + run_response = asyncio.run(agent.acontinue_run(run_response=run_response)) + pprint.pprint_run_response(run_response) + + # Or for simple debug flow + # agent.print_response("Send an email with the subject 'Hello' and the body 'Hello, world!'") + ``` + + + + + + ```bash + pip install -U agno openai + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python user_input_required_async.py + ``` + + ```bash Windows + python user_input_required_async.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/user-input-required-stream-async.mdx b/examples/basics/agent/hitl/user-input-required-stream-async.mdx new file mode 100644 index 00000000..9e87ef77 --- /dev/null +++ b/examples/basics/agent/hitl/user-input-required-stream-async.mdx @@ -0,0 +1,138 @@ +--- +title: User Input Required Stream Async +sidebarTitle: User Input Stream +keywords: [user input, async streaming, requires_user_input, arun stream] +description: This example demonstrates how to use the requires_user_input parameter with async streaming responses. It shows how to collect specific user input fields in an asynchronous environment while maintaining real-time streaming. +mode: wide +--- + + + + + Create a Python file and add the above code. + ```bash + touch user_input_required_stream_async.py + ``` + + + + + ```python user_input_required_stream_async.py + import asyncio + from typing import List + from agno.agent import Agent + from agno.db.sqlite import SqliteDb + from agno.models.openai import OpenAIChat + from agno.tools import tool + from agno.tools.function import UserInputField + + + # You can either specify the user_input_fields leave empty for all fields to be provided by the user + @tool(requires_user_input=True, user_input_fields=["to_address"]) + def send_email(subject: str, body: str, to_address: str) -> str: + """ + Send an email. + + Args: + subject (str): The subject of the email. + body (str): The body of the email. + to_address (str): The address to send the email to. + """ + return f"Sent email to {to_address} with subject {subject} and body {body}" + + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[send_email], + markdown=True, + db=SqliteDb(session_table="test_session", db_file="tmp/example.db"), + ) + + + async def main(): + async for run_event in agent.arun( + "Send an email with the subject 'Hello' and the body 'Hello, world!'", + stream=True, + ): + if run_event.is_paused: # Or agent.run_response.is_paused + for tool in run_event.tools_requiring_user_input: # type: ignore + input_schema: List[UserInputField] = tool.user_input_schema # type: ignore + + for field in input_schema: + # Get user input for each field in the schema + field_type = field.field_type + field_description = field.description + + # Display field information to the user + print(f"\nField: {field.name}") + print(f"Description: {field_description}") + print(f"Type: {field_type}") + + # Get user input + if field.value is None: + user_value = input(f"Please enter a value for {field.name}: ") + else: + print(f"Value: {field.value}") + user_value = field.value + + # Update the field value + field.value = user_value + + async for resp in agent.acontinue_run( # type: ignore + run_id=run_event.run_id, + updated_tools=run_event.tools, + stream=True, + ): + print(resp.content, end="") + + # Or for simple debug flow + # agent.aprint_response("Send an email with the subject 'Hello' and the body 'Hello, world!'") + + + if __name__ == "__main__": + asyncio.run(main()) + ``` + + + + + + ```bash + pip install -U agno openai + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python user_input_required_stream_async.py + ``` + + ```bash Windows + python user_input_required_stream_async.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/hitl/user-input-required.mdx b/examples/basics/agent/hitl/user-input-required.mdx new file mode 100644 index 00000000..58b0e0ad --- /dev/null +++ b/examples/basics/agent/hitl/user-input-required.mdx @@ -0,0 +1,126 @@ +--- +title: User Input Required for Tool Execution +sidebarTitle: User Input Required +keywords: [user input, requires_user_input, user_input_fields, UserInputField, form input] +description: This example demonstrates how to create tools that require user input before execution, allowing for dynamic data collection during agent runs. +mode: wide +--- + + + + + Create a Python file for the example. + ```bash + touch user_input_required.py + ``` + + + + ```python user_input_required.py + from typing import List + from agno.agent import Agent + from agno.models.openai import OpenAIChat + from agno.tools import tool + from agno.tools.function import UserInputField + from agno.utils import pprint + + + # You can either specify the user_input_fields leave empty for all fields to be provided by the user + @tool(requires_user_input=True, user_input_fields=["to_address"]) + def send_email(subject: str, body: str, to_address: str) -> str: + """ + Send an email. + + Args: + subject (str): The subject of the email. + body (str): The body of the email. + to_address (str): The address to send the email to. + """ + return f"Sent email to {to_address} with subject {subject} and body {body}" + + + agent = Agent( + model=OpenAIChat(id="gpt-5-mini"), + tools=[send_email], + markdown=True, + ) + + run_response = agent.run( + "Send an email with the subject 'Hello' and the body 'Hello, world!'" + ) + if run_response.is_paused: + for tool in run_response.tools_requiring_user_input: # type: ignore + input_schema: List[UserInputField] = tool.user_input_schema # type: ignore + + for field in input_schema: + # Get user input for each field in the schema + field_type = field.field_type + field_description = field.description + + # Display field information to the user + print(f"\nField: {field.name}") + print(f"Description: {field_description}") + print(f"Type: {field_type}") + + # Get user input + if field.value is None: + user_value = input(f"Please enter a value for {field.name}: ") + else: + print(f"Value: {field.value}") + user_value = field.value + + # Update the field value + field.value = user_value + + run_response = agent.continue_run( + run_response=run_response + ) # or agent.continue_run(run_response=run_response) + pprint.pprint_run_response(run_response) + + # Or for simple debug flow + # agent.print_response("Send an email with the subject 'Hello' and the body 'Hello, world!'") + ``` + + + + + + ```bash + pip install -U agno openai + ``` + + + + + + + ```bash Mac/Linux + export OPENAI_API_KEY="your_openai_api_key_here" + ``` + + ```bash Windows + $Env:OPENAI_API_KEY="your_openai_api_key_here" + ``` + + + + + + ```bash Mac + python user_input_required.py + ``` + + ```bash Windows + python user_input_required.py + ``` + + + + + Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: + + + Agno Cookbooks on GitHub + + + diff --git a/examples/basics/agent/human_in_the_loop/agentic_user_input.mdx b/examples/basics/agent/human_in_the_loop/agentic_user_input.mdx deleted file mode 100644 index 9de62037..00000000 --- a/examples/basics/agent/human_in_the_loop/agentic_user_input.mdx +++ /dev/null @@ -1,187 +0,0 @@ ---- -title: Agentic User Input with Control Flow ---- - -This example demonstrates how to use UserControlFlowTools to allow agents to dynamically request user input when they need additional information to complete tasks. - -## Code - -```python agentic_user_input.py -"""🤝 Human-in-the-Loop: Allowing users to provide input externally - -This example shows how to use the UserControlFlowTools to allow the agent to get user input dynamically. -If the agent doesn't have enough information to complete a task, it will use the toolkit to get the information it needs from the user. -""" - -from typing import Any, Dict, List - -from agno.agent import Agent -from agno.models.openai import OpenAIChat -from agno.tools import Toolkit -from agno.tools.function import UserInputField -from agno.tools.user_control_flow import UserControlFlowTools -from agno.utils import pprint - - -class EmailTools(Toolkit): - def __init__(self, *args, **kwargs): - super().__init__( - name="EmailTools", tools=[self.send_email, self.get_emails], *args, **kwargs - ) - - def send_email(self, subject: str, body: str, to_address: str) -> str: - """Send an email to the given address with the given subject and body. - - Args: - subject (str): The subject of the email. - body (str): The body of the email. - to_address (str): The address to send the email to. - """ - return f"Sent email to {to_address} with subject {subject} and body {body}" - - def get_emails(self, date_from: str, date_to: str) -> list[dict[str, str]]: - """Get all emails between the given dates. - - Args: - date_from (str): The start date (in YYYY-MM-DD format). - date_to (str): The end date (in YYYY-MM-DD format). - """ - return [ - { - "subject": "Hello", - "body": "Hello, world!", - "to_address": "test@test.com", - "date": date_from, - }, - { - "subject": "Random other email", - "body": "This is a random other email", - "to_address": "john@doe.com", - "date": date_to, - }, - ] - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[EmailTools(), UserControlFlowTools()], - markdown=True, -) - -run_response = agent.run("Send an email with the body 'What is the weather in Tokyo?'") - -# We use a while loop to continue the running until the agent is satisfied with the user input -while run_response.is_paused: - for tool in run_response.tools_requiring_user_input: - input_schema: List[UserInputField] = tool.user_input_schema # type: ignore - - for field in input_schema: - # Get user input for each field in the schema - field_type = field.field_type # type: ignore - field_description = field.description # type: ignore - - # Display field information to the user - print(f"\nField: {field.name}") # type: ignore - print(f"Description: {field_description}") - print(f"Type: {field_type}") - - # Get user input - if field.value is None: # type: ignore - user_value = input(f"Please enter a value for {field.name}: ") # type: ignore - else: - print(f"Value: {field.value}") # type: ignore - user_value = field.value # type: ignore - - # Update the field value - field.value = user_value # type: ignore - - run_response = agent.continue_run(run_response=run_response) - if not run_response.is_paused: - pprint.pprint_run_response(run_response) - break - - -run_response = agent.run("Get me all my emails") - -while run_response.is_paused: - for tool in run_response.tools_requiring_user_input: - input_schema: Dict[str, Any] = tool.user_input_schema # type: ignore - - for field in input_schema: - # Get user input for each field in the schema - field_type = field.field_type # type: ignore - field_description = field.description # type: ignore - - # Display field information to the user - print(f"\nField: {field.name}") # type: ignore - print(f"Description: {field_description}") - print(f"Type: {field_type}") - - # Get user input - if field.value is None: # type: ignore - user_value = input(f"Please enter a value for {field.name}: ") # type: ignore - else: - print(f"Value: {field.value}") # type: ignore - user_value = field.value # type: ignore - - # Update the field value - field.value = user_value # type: ignore - - run_response = agent.continue_run(run_response=run_response) - if not run_response.is_paused: - pprint.pprint_run_response(run_response) - break -``` - -## Usage - - - - - - ```bash - pip install -U agno openai - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch agentic_user_input.py - ``` - - - - - ```bash Mac - python agentic_user_input.py - ``` - - ```bash Windows - python agentic_user_input.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/confirmation_required.mdx b/examples/basics/agent/human_in_the_loop/confirmation_required.mdx deleted file mode 100644 index 8f604644..00000000 --- a/examples/basics/agent/human_in_the_loop/confirmation_required.mdx +++ /dev/null @@ -1,155 +0,0 @@ ---- -title: Tool Confirmation Required ---- - -This example demonstrates how to implement human-in-the-loop functionality by requiring user confirmation before executing sensitive tool operations, such as API calls or data modifications. - -## Code - -```python confirmation_required.py -"""🤝 Human-in-the-Loop: Adding User Confirmation to Tool Calls - -This example shows how to implement human-in-the-loop functionality in your Agno tools. -It shows how to: -- Handle user confirmation during tool execution -- Gracefully cancel operations based on user choice - -Some practical applications: -- Confirming sensitive operations before execution -- Reviewing API calls before they're made -- Validating data transformations -- Approving automated actions in critical systems - -Run `pip install openai httpx rich agno` to install dependencies. -""" - -import json - -import httpx -from agno.agent import Agent -from agno.db.sqlite import SqliteDb -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.utils import pprint -from rich.console import Console -from rich.prompt import Prompt - -console = Console() - - -@tool(requires_confirmation=True) -def get_top_hackernews_stories(num_stories: int) -> str: - """Fetch top stories from Hacker News. - - Args: - num_stories (int): Number of stories to retrieve - - Returns: - str: JSON string containing story details - """ - # Fetch top story IDs - response = httpx.get("https://hacker-news.firebaseio.com/v0/topstories.json") - story_ids = response.json() - - # Yield story details - all_stories = [] - for story_id in story_ids[:num_stories]: - story_response = httpx.get( - f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" - ) - story = story_response.json() - if "text" in story: - story.pop("text", None) - all_stories.append(story) - return json.dumps(all_stories) - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[get_top_hackernews_stories], - markdown=True, - db=SqliteDb(session_table="test_session", db_file="tmp/example.db"), -) - -run_response = agent.run("Fetch the top 2 hackernews stories.") -if run_response.is_paused: - for tool in run_response.tools_requiring_confirmation: - # Ask for confirmation - console.print( - f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] requires confirmation." - ) - message = ( - Prompt.ask("Do you want to continue?", choices=["y", "n"], default="y") - .strip() - .lower() - ) - - if message == "n": - tool.confirmed = False - else: - # We update the tools in place - tool.confirmed = True - -run_response = agent.continue_run(run_response=run_response) -# Or -# run_response = agent.continue_run(run_id=run_response.run_id, updated_tools=run_response.tools) - -pprint.pprint_run_response(run_response) - - -# Or for simple debug flow -# agent.print_response("Fetch the top 2 hackernews stories") -``` - -## Usage - - - - - - ```bash - pip install -U agno openai httpx rich - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch confirmation_required.py - ``` - - - - - ```bash Mac - python confirmation_required.py - ``` - - ```bash Windows - python confirmation_required.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/confirmation_required_async.mdx b/examples/basics/agent/human_in_the_loop/confirmation_required_async.mdx deleted file mode 100644 index 1fc8d25f..00000000 --- a/examples/basics/agent/human_in_the_loop/confirmation_required_async.mdx +++ /dev/null @@ -1,155 +0,0 @@ ---- -title: Async Tool Confirmation Required ---- - -This example demonstrates how to implement human-in-the-loop functionality with async agents, requiring user confirmation before executing tool operations. - -## Code - -```python confirmation_required_async.py -"""🤝 Human-in-the-Loop: Adding User Confirmation to Tool Calls - -This example shows how to implement human-in-the-loop functionality in your Agno tools. -It shows how to: -- Handle user confirmation during tool execution -- Gracefully cancel operations based on user choice - -Some practical applications: -- Confirming sensitive operations before execution -- Reviewing API calls before they're made -- Validating data transformations -- Approving automated actions in critical systems - -Run `pip install openai httpx rich agno` to install dependencies. -""" - -import asyncio -import json - -import httpx -from agno.agent import Agent -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.utils import pprint -from rich.console import Console -from rich.prompt import Prompt - -console = Console() - - -@tool(requires_confirmation=True) -def get_top_hackernews_stories(num_stories: int) -> str: - """Fetch top stories from Hacker News. - - Args: - num_stories (int): Number of stories to retrieve - - Returns: - str: JSON string containing story details - """ - # Fetch top story IDs - response = httpx.get("https://hacker-news.firebaseio.com/v0/topstories.json") - story_ids = response.json() - - # Yield story details - all_stories = [] - for story_id in story_ids[:num_stories]: - story_response = httpx.get( - f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" - ) - story = story_response.json() - if "text" in story: - story.pop("text", None) - all_stories.append(story) - return json.dumps(all_stories) - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[get_top_hackernews_stories], - markdown=True, -) - -run_response = asyncio.run(agent.arun("Fetch the top 2 hackernews stories")) -if run_response.is_paused: - for tool in run_response.tools_requiring_confirmation: - # Ask for confirmation - console.print( - f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] requires confirmation." - ) - message = ( - Prompt.ask("Do you want to continue?", choices=["y", "n"], default="y") - .strip() - .lower() - ) - - if message == "n": - tool.confirmed = False - else: - # We update the tools in place - tool.confirmed = True - - -run_response = asyncio.run(agent.acontinue_run(run_response=run_response)) -# Or -# run_response = asyncio.run(agent.acontinue_run(run_id=run_response.run_id)) - -pprint.pprint_run_response(run_response) - - -# Or for simple debug flow -# asyncio.run(agent.aprint_response("Fetch the top 2 hackernews stories")) -``` - -## Usage - - - - - - ```bash - pip install -U agno openai httpx rich - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch confirmation_required_async.py - ``` - - - - - ```bash Mac - python confirmation_required_async.py - ``` - - ```bash Windows - python confirmation_required_async.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/confirmation_required_mixed_tools.mdx b/examples/basics/agent/human_in_the_loop/confirmation_required_mixed_tools.mdx deleted file mode 100644 index faa378d6..00000000 --- a/examples/basics/agent/human_in_the_loop/confirmation_required_mixed_tools.mdx +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: Confirmation Required with Mixed Tools ---- - -This example demonstrates human-in-the-loop functionality where only some tools require user confirmation. The agent executes tools that don't require confirmation automatically and pauses only for tools that need approval. - -## Code - -```python confirmation_required_mixed_tools.py -"""🤝 Human-in-the-Loop: Adding User Confirmation to Tool Calls - -This example shows how to implement human-in-the-loop functionality in your Agno tools. - -In this case we have multiple tools and only one of them requires confirmation. - -The agent should execute the tool that doesn't require confirmation and then pause for user confirmation. - -The user can then either approve or reject the tool call and the agent should continue from where it left off. -""" - -import json - -import httpx -from agno.agent import Agent -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.utils import pprint -from rich.console import Console -from rich.prompt import Prompt - -console = Console() - - -def get_top_hackernews_stories(num_stories: int) -> str: - """Fetch top stories from Hacker News. - - Args: - num_stories (int): Number of stories to retrieve - - Returns: - str: JSON string containing story details - """ - # Fetch top story IDs - response = httpx.get("https://hacker-news.firebaseio.com/v0/topstories.json") - story_ids = response.json() - - # Yield story details - all_stories = [] - for story_id in story_ids[:num_stories]: - story_response = httpx.get( - f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" - ) - story = story_response.json() - if "text" in story: - story.pop("text", None) - all_stories.append(story) - return json.dumps(all_stories) - - -@tool(requires_confirmation=True) -def send_email(to: str, subject: str, body: str) -> str: - """Send an email. - - Args: - to (str): Email address to send to - subject (str): Subject of the email - body (str): Body of the email - """ - return f"Email sent to {to} with subject {subject} and body {body}" - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[get_top_hackernews_stories, send_email], - markdown=True, -) - -run_response = agent.run( - "Fetch the top 2 hackernews stories and email them to john@doe.com." -) -if run_response.is_paused: - for tool in run_response.tools: # type: ignore - if tool.requires_confirmation: - # Ask for confirmation - console.print( - f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] requires confirmation." - ) - message = ( - Prompt.ask("Do you want to continue?", choices=["y", "n"], default="y") - .strip() - .lower() - ) - - if message == "n": - tool.confirmed = False - else: - # We update the tools in place - tool.confirmed = True - else: - console.print( - f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] was completed in [bold green]{tool.metrics.duration:.2f}[/] seconds." # type: ignore - ) - - run_response = agent.continue_run(run_response=run_response) - pprint.pprint_run_response(run_response) - -# Or for simple debug flow -# agent.print_response("Fetch the top 2 hackernews stories") -``` - -## Usage - - - - - - ```bash - pip install -U agno openai httpx rich - ``` - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch confirmation_required_mixed_tools.py - ``` - - - - - ```bash Mac - python confirmation_required_mixed_tools.py - ``` - - ```bash Windows - python confirmation_required_mixed_tools.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/confirmation_required_multiple_tools.mdx b/examples/basics/agent/human_in_the_loop/confirmation_required_multiple_tools.mdx deleted file mode 100644 index e56d724d..00000000 --- a/examples/basics/agent/human_in_the_loop/confirmation_required_multiple_tools.mdx +++ /dev/null @@ -1,154 +0,0 @@ ---- -title: Confirmation Required with Multiple Tools ---- - -This example demonstrates human-in-the-loop functionality with multiple tools that require confirmation. It shows how to handle user confirmation during tool execution and gracefully cancel operations based on user choice. - -## Code - -```python confirmation_required_multiple_tools.py -"""🤝 Human-in-the-Loop: Adding User Confirmation to Tool Calls - -This example shows how to implement human-in-the-loop functionality in your Agno tools. -It shows how to: -- Handle user confirmation during tool execution -- Gracefully cancel operations based on user choice - -Some practical applications: -- Confirming sensitive operations before execution -- Reviewing API calls before they're made -- Validating data transformations -- Approving automated actions in critical systems - -Run `pip install openai httpx rich agno` to install dependencies. -""" - -import json - -import httpx -from agno.agent import Agent -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.tools.wikipedia import WikipediaTools -from agno.utils import pprint -from rich.console import Console -from rich.prompt import Prompt - -console = Console() - - -@tool(requires_confirmation=True) -def get_top_hackernews_stories(num_stories: int) -> str: - """Fetch top stories from Hacker News. - - Args: - num_stories (int): Number of stories to retrieve - - Returns: - str: JSON string containing story details - """ - # Fetch top story IDs - response = httpx.get("https://hacker-news.firebaseio.com/v0/topstories.json") - story_ids = response.json() - - # Yield story details - all_stories = [] - for story_id in story_ids[:num_stories]: - story_response = httpx.get( - f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" - ) - story = story_response.json() - if "text" in story: - story.pop("text", None) - all_stories.append(story) - return json.dumps(all_stories) - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[ - get_top_hackernews_stories, - WikipediaTools(requires_confirmation_tools=["search_wikipedia"]), - ], - markdown=True, -) - -run_response = agent.run( - "Fetch 2 articles about the topic 'python'. You can choose which source to use, but only use one source." -) -while run_response.is_paused: - for tool_exc in run_response.tools_requiring_confirmation: # type: ignore - # Ask for confirmation - console.print( - f"Tool name [bold blue]{tool_exc.tool_name}({tool_exc.tool_args})[/] requires confirmation." - ) - message = ( - Prompt.ask("Do you want to continue?", choices=["y", "n"], default="y") - .strip() - .lower() - ) - - if message == "n": - tool.confirmed = False - tool.confirmation_note = ( - "This is not the right tool to use. Use the other tool!" - ) - else: - # We update the tools in place - tool.confirmed = True - - run_response = agent.continue_run(run_response=run_response) - pprint.pprint_run_response(run_response) -``` - -## Usage - - - - - - ```bash - pip install -U agno openai httpx rich - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch confirmation_required_multiple_tools.py - ``` - - - - ```bash Mac - python confirmation_required_multiple_tools.py - ``` - - ```bash Windows - python confirmation_required_multiple_tools.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/confirmation_required_stream.mdx b/examples/basics/agent/human_in_the_loop/confirmation_required_stream.mdx deleted file mode 100644 index 2f2f8001..00000000 --- a/examples/basics/agent/human_in_the_loop/confirmation_required_stream.mdx +++ /dev/null @@ -1,154 +0,0 @@ ---- -title: Confirmation Required with Streaming ---- - -This example demonstrates human-in-the-loop functionality with streaming responses. It shows how to handle user confirmation during tool execution while maintaining real-time streaming capabilities. - -## Code - -```python confirmation_required_stream.py -"""🤝 Human-in-the-Loop: Adding User Confirmation to Tool Calls - -This example shows how to implement human-in-the-loop functionality in your Agno tools. -It shows how to: -- Handle user confirmation during tool execution -- Gracefully cancel operations based on user choice - -Some practical applications: -- Confirming sensitive operations before execution -- Reviewing API calls before they're made -- Validating data transformations -- Approving automated actions in critical systems - -Run `pip install openai httpx rich agno` to install dependencies. -""" - -import json - -import httpx -from agno.agent import Agent -from agno.db.sqlite import SqliteDb -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.utils import pprint -from rich.console import Console -from rich.prompt import Prompt - -console = Console() - - -@tool(requires_confirmation=True) -def get_top_hackernews_stories(num_stories: int) -> str: - """Fetch top stories from Hacker News. - - Args: - num_stories (int): Number of stories to retrieve - - Returns: - str: JSON string containing story details - """ - # Fetch top story IDs - response = httpx.get("https://hacker-news.firebaseio.com/v0/topstories.json") - story_ids = response.json() - - # Yield story details - all_stories = [] - for story_id in story_ids[:num_stories]: - story_response = httpx.get( - f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" - ) - story = story_response.json() - if "text" in story: - story.pop("text", None) - all_stories.append(story) - return json.dumps(all_stories) - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - db=SqliteDb( - db_file="tmp/example.db", - ), - tools=[get_top_hackernews_stories], - markdown=True, -) - -for run_event in agent.run("Fetch the top 2 hackernews stories", stream=True): - if run_event.is_paused: - for tool in run_event.tools_requiring_confirmation: # type: ignore - # Ask for confirmation - console.print( - f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] requires confirmation." - ) - message = ( - Prompt.ask("Do you want to continue?", choices=["y", "n"], default="y") - .strip() - .lower() - ) - - if message == "n": - tool.confirmed = False - else: - # We update the tools in place - tool.confirmed = True - run_response = agent.continue_run( - run_id=run_event.run_id, updated_tools=run_event.tools, stream=True - ) # type: ignore - pprint.pprint_run_response(run_response) - -# Or for simple debug flow -# agent.print_response("Fetch the top 2 hackernews stories", stream=True) -``` - -## Usage - - - - - - ```bash - pip install -U agno openai httpx rich - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch confirmation_required_stream.py - ``` - - - - - ```bash Mac - python confirmation_required_stream.py - ``` - - ```bash Windows - python confirmation_required_stream.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/confirmation_required_stream_async.mdx b/examples/basics/agent/human_in_the_loop/confirmation_required_stream_async.mdx deleted file mode 100644 index 17dc1e22..00000000 --- a/examples/basics/agent/human_in_the_loop/confirmation_required_stream_async.mdx +++ /dev/null @@ -1,163 +0,0 @@ ---- -title: Confirmation Required with Async Streaming ---- - -This example demonstrates human-in-the-loop functionality with asynchronous streaming responses. It shows how to handle user confirmation during tool execution in an async environment while maintaining real-time streaming. - -## Code - -```python confirmation_required_stream_async.py -"""🤝 Human-in-the-Loop: Adding User Confirmation to Tool Calls - -This example shows how to implement human-in-the-loop functionality in your Agno tools. -It shows how to: -- Handle user confirmation during tool execution -- Gracefully cancel operations based on user choice - -Some practical applications: -- Confirming sensitive operations before execution -- Reviewing API calls before they're made -- Validating data transformations -- Approving automated actions in critical systems - -Run `pip install openai httpx rich agno` to install dependencies. -""" - -import asyncio -import json - -import httpx -from agno.agent import Agent -from agno.db.sqlite import SqliteDb -from agno.models.openai import OpenAIChat -from agno.tools import tool -from rich.console import Console -from rich.prompt import Prompt - -console = Console() - - -@tool(requires_confirmation=True) -def get_top_hackernews_stories(num_stories: int) -> str: - """Fetch top stories from Hacker News. - - Args: - num_stories (int): Number of stories to retrieve - - Returns: - str: JSON string containing story details - """ - # Fetch top story IDs - response = httpx.get("https://hacker-news.firebaseio.com/v0/topstories.json") - story_ids = response.json() - - # Yield story details - all_stories = [] - for story_id in story_ids[:num_stories]: - story_response = httpx.get( - f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" - ) - story = story_response.json() - if "text" in story: - story.pop("text", None) - all_stories.append(story) - return json.dumps(all_stories) - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[get_top_hackernews_stories], - db=SqliteDb(session_table="test_session", db_file="tmp/example.db"), - markdown=True, -) - - -async def main(): - async for run_event in agent.arun( - "Fetch the top 2 hackernews stories", stream=True - ): - if run_event.is_paused: - for tool in run_event.tools_requiring_confirmation: # type: ignore - # Ask for confirmation - console.print( - f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] requires confirmation." - ) - message = ( - Prompt.ask( - "Do you want to continue?", choices=["y", "n"], default="y" - ) - .strip() - .lower() - ) - - if message == "n": - tool.confirmed = False - else: - # We update the tools in place - tool.confirmed = True - - async for resp in agent.acontinue_run( # type: ignore - run_id=run_event.run_id, updated_tools=run_event.tools, stream=True - ): - print(resp.content, end="") - - # Or for simple debug flow - # await agent.aprint_response("Fetch the top 2 hackernews stories", stream=True) - - -if __name__ == "__main__": - asyncio.run(main()) -``` - -## Usage - - - - - - ```bash - pip install -U agno openai httpx rich - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch confirmation_required_stream_async.py - ``` - - - - - ```bash Mac - python confirmation_required_stream_async.py - ``` - - ```bash Windows - python confirmation_required_stream_async.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/confirmation_required_toolkit.mdx b/examples/basics/agent/human_in_the_loop/confirmation_required_toolkit.mdx deleted file mode 100644 index 654f2c29..00000000 --- a/examples/basics/agent/human_in_the_loop/confirmation_required_toolkit.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Confirmation Required with Toolkit ---- - -This example demonstrates human-in-the-loop functionality using toolkit-based tools that require confirmation. It shows how to handle user confirmation when working with pre-built tool collections like YFinanceTools. - -## Code - -```python confirmation_required_toolkit.py -"""🤝 Human-in-the-Loop: Adding User Confirmation to Tool Calls - -This example shows how to implement human-in-the-loop functionality in your Agno tools. -It shows how to: -- Handle user confirmation during tool execution -- Gracefully cancel operations based on user choice - -Some practical applications: -- Confirming sensitive operations before execution -- Reviewing API calls before they're made -- Validating data transformations -- Approving automated actions in critical systems - -Run `pip install openai httpx rich agno` to install dependencies. -""" - -from agno.agent import Agent -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.tools.duckduckgo import DuckDuckGoTools -from agno.utils import pprint -from rich.console import Console -from rich.prompt import Prompt - -console = Console() - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[DuckDuckGoTools(requires_confirmation_tools=["get_current_stock_price"])], - markdown=True, -) - -run_response = agent.run("What is the current stock price of Apple?") -if run_response.is_paused: # Or agent.run_response.is_paused - for tool in run_response.tools_requiring_confirmation: # type: ignore - # Ask for confirmation - console.print( - f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] requires confirmation." - ) - message = ( - Prompt.ask("Do you want to continue?", choices=["y", "n"], default="y") - .strip() - .lower() - ) - - if message == "n": - tool.confirmed = False - else: - # We update the tools in place - tool.confirmed = True - - run_response = agent.continue_run(run_response=run_response) - pprint.pprint_run_response(run_response) -``` - -## Usage - - - - - - ```bash - pip install -U agno openai ddgs rich - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch confirmation_required_toolkit.py - ``` - - - - - ```bash Mac - python confirmation_required_toolkit.py - ``` - - ```bash Windows - python confirmation_required_toolkit.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/confirmation_required_with_history.mdx b/examples/basics/agent/human_in_the_loop/confirmation_required_with_history.mdx deleted file mode 100644 index 85a493e7..00000000 --- a/examples/basics/agent/human_in_the_loop/confirmation_required_with_history.mdx +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: Confirmation Required with History ---- - -This example demonstrates human-in-the-loop functionality while maintaining conversation history. It shows how user confirmation works when the agent has access to previous conversation context. - -## Code - -```python confirmation_required_with_history.py -"""🤝 Human-in-the-Loop: Adding User Confirmation to Tool Calls - -This example shows how to implement human-in-the-loop functionality in your Agno tools. -It shows how to: -- Handle user confirmation during tool execution -- Gracefully cancel operations based on user choice - -Some practical applications: -- Confirming sensitive operations before execution -- Reviewing API calls before they're made -- Validating data transformations -- Approving automated actions in critical systems - -Run `pip install openai httpx rich agno` to install dependencies. -""" - -import json - -import httpx -from agno.agent import Agent -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.utils import pprint -from rich.console import Console -from rich.prompt import Prompt - -console = Console() - - -@tool(requires_confirmation=True) -def get_top_hackernews_stories(num_stories: int) -> str: - """Fetch top stories from Hacker News. - - Args: - num_stories (int): Number of stories to retrieve - - Returns: - str: JSON string containing story details - """ - # Fetch top story IDs - response = httpx.get("https://hacker-news.firebaseio.com/v0/topstories.json") - story_ids = response.json() - - # Yield story details - all_stories = [] - for story_id in story_ids[:num_stories]: - story_response = httpx.get( - f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" - ) - story = story_response.json() - if "text" in story: - story.pop("text", None) - all_stories.append(story) - return json.dumps(all_stories) - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[get_top_hackernews_stories], - add_history_to_context=True, - num_history_runs=2, - markdown=True, -) - -agent.run("What can you do?") - -run_response = agent.run("Fetch the top 2 hackernews stories.") -if run_response.is_paused: - for tool in run_response.tools_requiring_confirmation: # type: ignore - # Ask for confirmation - console.print( - f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] requires confirmation." - ) - message = ( - Prompt.ask("Do you want to continue?", choices=["y", "n"], default="y") - .strip() - .lower() - ) - - if message == "n": - tool.confirmed = False - else: - # We update the tools in place - tool.confirmed = True - -run_response = agent.continue_run(run_response=run_response) -pprint.pprint_run_response(run_response) -``` - -## Usage - - - - - - ```bash - pip install -U agno openai httpx rich - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch confirmation_required_with_history.py - ``` - - - - - ```bash Mac - python confirmation_required_with_history.py - ``` - - ```bash Windows - python confirmation_required_with_history.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/confirmation_required_with_run_id.mdx b/examples/basics/agent/human_in_the_loop/confirmation_required_with_run_id.mdx deleted file mode 100644 index 6982c831..00000000 --- a/examples/basics/agent/human_in_the_loop/confirmation_required_with_run_id.mdx +++ /dev/null @@ -1,154 +0,0 @@ ---- -title: Confirmation Required with Run ID ---- - -This example demonstrates human-in-the-loop functionality using specific run IDs for session management. It shows how to continue agent execution with updated tools using run identifiers. - -## Code - -```python confirmation_required_with_run_id.py -"""🤝 Human-in-the-Loop: Adding User Confirmation to Tool Calls - -This example shows how to implement human-in-the-loop functionality in your Agno tools. -It shows how to: -- Handle user confirmation during tool execution -- Gracefully cancel operations based on user choice - -Some practical applications: -- Confirming sensitive operations before execution -- Reviewing API calls before they're made -- Validating data transformations -- Approving automated actions in critical systems - -Run `pip install openai httpx rich agno` to install dependencies. -""" - -import json - -import httpx -from agno.agent import Agent -from agno.db.sqlite import SqliteDb -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.utils import pprint -from rich.console import Console -from rich.prompt import Prompt - -console = Console() - - -@tool(requires_confirmation=True) -def get_top_hackernews_stories(num_stories: int) -> str: - """Fetch top stories from Hacker News. - - Args: - num_stories (int): Number of stories to retrieve - - Returns: - str: JSON string containing story details - """ - # Fetch top story IDs - response = httpx.get("https://hacker-news.firebaseio.com/v0/topstories.json") - story_ids = response.json() - - # Yield story details - all_stories = [] - for story_id in story_ids[:num_stories]: - story_response = httpx.get( - f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json" - ) - story = story_response.json() - if "text" in story: - story.pop("text", None) - all_stories.append(story) - return json.dumps(all_stories) - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[get_top_hackernews_stories], - markdown=True, - db=SqliteDb(session_table="test_session", db_file="tmp/example.db"), -) - -run_response = agent.run("Fetch the top 2 hackernews stories.") -if run_response.is_paused: - for tool in run_response.tools_requiring_confirmation: # type: ignore - # Ask for confirmation - console.print( - f"Tool name [bold blue]{tool.tool_name}({tool.tool_args})[/] requires confirmation." - ) - message = ( - Prompt.ask("Do you want to continue?", choices=["y", "n"], default="y") - .strip() - .lower() - ) - - if message == "n": - tool.confirmed = False - else: - # We update the tools in place - tool.confirmed = True - -updated_tools = run_response.tools - -run_response = agent.continue_run( - run_id=run_response.run_id, - updated_tools=updated_tools, -) - -pprint.pprint_run_response(run_response) -``` - -## Usage - - - - - - ```bash - pip install -U agno openai httpx rich - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch confirmation_required_with_run_id.py - ``` - - - - - ```bash Mac - python confirmation_required_with_run_id.py - ``` - - ```bash Windows - python confirmation_required_with_run_id.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/external_tool_execution.mdx b/examples/basics/agent/human_in_the_loop/external_tool_execution.mdx deleted file mode 100644 index 8a690450..00000000 --- a/examples/basics/agent/human_in_the_loop/external_tool_execution.mdx +++ /dev/null @@ -1,119 +0,0 @@ ---- -title: External Tool Execution ---- - -This example demonstrates how to execute tools outside of the agent using external tool execution. This pattern allows you to control tool execution externally while maintaining agent functionality. - -## Code - -```python external_tool_execution.py -"""🤝 Human-in-the-Loop: Execute a tool call outside of the agent - -This example shows how to implement human-in-the-loop functionality in your Agno tools. -It shows how to: -- Use external tool execution to execute a tool call outside of the agent - -Run `pip install openai agno` to install dependencies. -""" - -import subprocess - -from agno.agent import Agent -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.utils import pprint - - -# We have to create a tool with the correct name, arguments and docstring for the agent to know what to call. -@tool(external_execution=True) -def execute_shell_command(command: str) -> str: - """Execute a shell command. - - Args: - command (str): The shell command to execute - - Returns: - str: The output of the shell command - """ - if command.startswith("ls"): - return subprocess.check_output(command, shell=True).decode("utf-8") - else: - raise Exception(f"Unsupported command: {command}") - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[execute_shell_command], - markdown=True, -) - -run_response = agent.run("What files do I have in my current directory?") -if run_response.is_paused: - for tool in run_response.tools_awaiting_external_execution: - if tool.tool_name == execute_shell_command.name: - print(f"Executing {tool.tool_name} with args {tool.tool_args} externally") - # We execute the tool ourselves. You can also execute something completely external here. - result = execute_shell_command.entrypoint(**tool.tool_args) # type: ignore - # We have to set the result on the tool execution object so that the agent can continue - tool.result = result - - run_response = agent.continue_run(run_response=run_response) - pprint.pprint_run_response(run_response) - - -# Or for simple debug flow -# agent.print_response("What files do I have in my current directory?") -``` - -## Usage - - - - - - ```bash - pip install -U agno openai - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch external_tool_execution.py - ``` - - - - - ```bash Mac - python external_tool_execution.py - ``` - - ```bash Windows - python external_tool_execution.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/external_tool_execution_async.mdx b/examples/basics/agent/human_in_the_loop/external_tool_execution_async.mdx deleted file mode 100644 index b43787f2..00000000 --- a/examples/basics/agent/human_in_the_loop/external_tool_execution_async.mdx +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: External Tool Execution Async ---- - -This example demonstrates how to execute tools outside of the agent using external tool execution in an asynchronous environment. This pattern allows you to control tool execution externally while maintaining agent functionality with async operations. - -## Code - -```python external_tool_execution_async.py -"""🤝 Human-in-the-Loop: Execute a tool call outside of the agent - -This example shows how to implement human-in-the-loop functionality in your Agno tools. -It shows how to: -- Use external tool execution to execute a tool call outside of the agent - -Run `pip install openai agno` to install dependencies. -""" - -import asyncio -import subprocess - -from agno.agent import Agent -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.utils import pprint - - -# We have to create a tool with the correct name, arguments and docstring for the agent to know what to call. -@tool(external_execution=True) -def execute_shell_command(command: str) -> str: - """Execute a shell command. - - Args: - command (str): The shell command to execute - - Returns: - str: The output of the shell command - """ - if command.startswith("ls"): - return subprocess.check_output(command, shell=True).decode("utf-8") - else: - raise Exception(f"Unsupported command: {command}") - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[execute_shell_command], - markdown=True, -) - -run_response = asyncio.run(agent.arun("What files do I have in my current directory?")) -if run_response.is_paused: - for tool in run_response.tools_awaiting_external_execution: - if tool.tool_name == execute_shell_command.name: - print(f"Executing {tool.tool_name} with args {tool.tool_args} externally") - # We execute the tool ourselves. You can also execute something completely external here. - result = execute_shell_command.entrypoint(**tool.tool_args) # type: ignore - # We have to set the result on the tool execution object so that the agent can continue - tool.result = result - - run_response = asyncio.run(agent.acontinue_run(run_response=run_response)) - pprint.pprint_run_response(run_response) - - -# Or for simple debug flow -# agent.print_response("What files do I have in my current directory?") -``` - -## Usage - - - - - - ```bash - pip install -U agno openai - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch external_tool_execution_async.py - ``` - - - - - ```bash Mac - python external_tool_execution_async.py - ``` - - ```bash Windows - python external_tool_execution_async.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/external_tool_execution_async_responses.mdx b/examples/basics/agent/human_in_the_loop/external_tool_execution_async_responses.mdx deleted file mode 100644 index 6fe2736f..00000000 --- a/examples/basics/agent/human_in_the_loop/external_tool_execution_async_responses.mdx +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: External Tool Execution Async Responses ---- - -This example demonstrates external tool execution using OpenAI Responses API with gpt-4.1-mini model. It shows how to handle tool-call IDs and execute multiple external tools in a loop until completion. - -## Code - -```python external_tool_execution_async_responses.py -"""🤝 Human-in-the-Loop with OpenAI Responses API (gpt-4.1-mini) - -This example mirrors the external tool execution async example but uses -OpenAIResponses with gpt-4.1-mini to validate tool-call id handling. - -Run `pip install openai agno` to install dependencies. -""" - -import asyncio -import subprocess - -from agno.agent import Agent -from agno.models.openai import OpenAIResponses -from agno.tools import tool -from agno.utils import pprint - - -# We have to create a tool with the correct name, arguments and docstring -# for the agent to know what to call. -@tool(external_execution=True) -def execute_shell_command(command: str) -> str: - """Execute a shell command. - - Args: - command (str): The shell command to execute - - Returns: - str: The output of the shell command - """ - if ( - command.startswith("ls ") - or command == "ls" - or command.startswith("cat ") - or command.startswith("head ") - ): - return subprocess.check_output(command, shell=True).decode("utf-8") - raise Exception(f"Unsupported command: {command}") - - -agent = Agent( - model=OpenAIResponses(id="gpt-4.1-mini"), - tools=[execute_shell_command], - markdown=True, -) - -run_response = asyncio.run(agent.arun("What files do I have in my current directory?")) - -# Keep executing externally-required tools until the run completes -while ( - run_response.is_paused and len(run_response.tools_awaiting_external_execution) > 0 -): - for external_tool in run_response.tools_awaiting_external_execution: - if external_tool.tool_name == execute_shell_command.name: - print( - f"Executing {external_tool.tool_name} with args {external_tool.tool_args} externally" - ) - result = execute_shell_command.entrypoint(**external_tool.tool_args) - external_tool.result = result - else: - print(f"Skipping unsupported external tool: {external_tool.tool_name}") - - run_response = asyncio.run(agent.acontinue_run(run_response=run_response)) - -pprint.pprint_run_response(run_response) - - -# Or for simple debug flow -# agent.print_response("What files do I have in my current directory?") -``` - -## Usage - - - - - - ```bash - pip install -U agno openai - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch external_tool_execution_async_responses.py - ``` - - - - - ```bash Mac - python external_tool_execution_async_responses.py - ``` - - ```bash Windows - python external_tool_execution_async_responses.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/external_tool_execution_stream.mdx b/examples/basics/agent/human_in_the_loop/external_tool_execution_stream.mdx deleted file mode 100644 index f712b4ee..00000000 --- a/examples/basics/agent/human_in_the_loop/external_tool_execution_stream.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: External Tool Execution Stream ---- - -This example demonstrates how to execute tools outside of the agent using external tool execution with streaming responses. It shows how to handle external tool execution while maintaining real-time streaming capabilities. - -## Code - -```python external_tool_execution_stream.py -"""🤝 Human-in-the-Loop: Execute a tool call outside of the agent - -This example shows how to implement human-in-the-loop functionality in your Agno tools. -It shows how to: -- Use external tool execution to execute a tool call outside of the agent - -Run `pip install openai agno` to install dependencies. -""" - -import subprocess - -from agno.agent import Agent -from agno.db.sqlite import SqliteDb -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.utils import pprint - - -# We have to create a tool with the correct name, arguments and docstring for the agent to know what to call. -@tool(external_execution=True) -def execute_shell_command(command: str) -> str: - """Execute a shell command. - - Args: - command (str): The shell command to execute - - Returns: - str: The output of the shell command - """ - if command.startswith("ls"): - return subprocess.check_output(command, shell=True).decode("utf-8") - else: - raise Exception(f"Unsupported command: {command}") - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[execute_shell_command], - markdown=True, - db=SqliteDb(session_table="test_session", db_file="tmp/example.db"), -) - -for run_event in agent.run( - "What files do I have in my current directory?", stream=True -): - if run_event.is_paused: - for tool in run_event.tools_awaiting_external_execution: # type: ignore - if tool.tool_name == execute_shell_command.name: - print( - f"Executing {tool.tool_name} with args {tool.tool_args} externally" - ) - # We execute the tool ourselves. You can also execute something completely external here. - result = execute_shell_command.entrypoint(**tool.tool_args) # type: ignore - # We have to set the result on the tool execution object so that the agent can continue - tool.result = result - - run_response = agent.continue_run( - run_id=run_event.run_id, updated_tools=run_event.tools, stream=True - ) # type: ignore - pprint.pprint_run_response(run_response) - - -# Or for simple debug flow -# agent.print_response("What files do I have in my current directory?", stream=True) -``` - -## Usage - - - - - - ```bash - pip install -U agno openai - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch external_tool_execution_stream.py - ``` - - - - - ```bash Mac - python external_tool_execution_stream.py - ``` - - ```bash Windows - python external_tool_execution_stream.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/external_tool_execution_stream_async.mdx b/examples/basics/agent/human_in_the_loop/external_tool_execution_stream_async.mdx deleted file mode 100644 index 004c33d7..00000000 --- a/examples/basics/agent/human_in_the_loop/external_tool_execution_stream_async.mdx +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: External Tool Execution Stream Async ---- - -This example demonstrates how to execute tools outside of the agent using external tool execution with async streaming responses. It shows how to handle external tool execution in an asynchronous environment while maintaining real-time streaming. - -## Code - -```python external_tool_execution_stream_async.py -"""🤝 Human-in-the-Loop: Execute a tool call outside of the agent - -This example shows how to implement human-in-the-loop functionality in your Agno tools. -It shows how to: -- Use external tool execution to execute a tool call outside of the agent - -Run `pip install openai agno` to install dependencies. -""" - -import asyncio -import subprocess - -from agno.agent import Agent -from agno.db.sqlite import SqliteDb -from agno.models.openai import OpenAIChat -from agno.tools import tool - - -# We have to create a tool with the correct name, arguments and docstring for the agent to know what to call. -@tool(external_execution=True) -def execute_shell_command(command: str) -> str: - """Execute a shell command. - - Args: - command (str): The shell command to execute - - Returns: - str: The output of the shell command - """ - if command.startswith("ls"): - return subprocess.check_output(command, shell=True).decode("utf-8") - else: - raise Exception(f"Unsupported command: {command}") - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[execute_shell_command], - markdown=True, - db=SqliteDb(session_table="test_session", db_file="tmp/example.db"), -) - - -async def main(): - async for run_event in agent.arun( - "What files do I have in my current directory?", stream=True - ): - if run_event.is_paused: - for tool in run_event.tools_awaiting_external_execution: # type: ignore - if tool.tool_name == execute_shell_command.name: - print( - f"Executing {tool.tool_name} with args {tool.tool_args} externally" - ) - # We execute the tool ourselves. You can also execute something completely external here. - result = execute_shell_command.entrypoint(**tool.tool_args) # type: ignore - # We have to set the result on the tool execution object so that the agent can continue - tool.result = result - - async for resp in agent.acontinue_run( # type: ignore - run_id=run_event.run_id, - updated_tools=run_event.tools, - stream=True, - ): - print(resp.content, end="") - else: - print(run_event.content, end="") - - # Or for simple debug flow - # agent.print_response("What files do I have in my current directory?", stream=True) - - -if __name__ == "__main__": - asyncio.run(main()) -``` - -## Usage - - - - - - ```bash - pip install -U agno openai - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch external_tool_execution_stream_async.py - ``` - - - - - ```bash Mac - python external_tool_execution_stream_async.py - ``` - - ```bash Windows - python external_tool_execution_stream_async.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/external_tool_execution_toolkit.mdx b/examples/basics/agent/human_in_the_loop/external_tool_execution_toolkit.mdx deleted file mode 100644 index 7d779e2a..00000000 --- a/examples/basics/agent/human_in_the_loop/external_tool_execution_toolkit.mdx +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: External Tool Execution Toolkit ---- - -This example demonstrates how to execute toolkit-based tools outside of the agent using external tool execution. It shows how to create a custom toolkit with tools that require external execution. - -## Code - -```python external_tool_execution_toolkit.py -"""🤝 Human-in-the-Loop: Execute a tool call outside of the agent - -This example shows how to implement human-in-the-loop functionality in your Agno tools. -It shows how to: -- Use external tool execution to execute a tool call outside of the agent - -Run `pip install openai agno` to install dependencies. -""" - -import subprocess - -from agno.agent import Agent -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.tools.toolkit import Toolkit -from agno.utils import pprint - - -class ShellTools(Toolkit): - def __init__(self, *args, **kwargs): - super().__init__( - tools=[self.list_dir], - external_execution_required_tools=["list_dir"], - *args, - **kwargs, - ) - - def list_dir(self, directory: str): - """ - Lists the contents of a directory. - - Args: - directory: The directory to list. - - Returns: - A string containing the contents of the directory. - """ - return subprocess.check_output(f"ls {directory}", shell=True).decode("utf-8") - - -tools = ShellTools() - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[tools], - markdown=True, -) - -run_response = agent.run("What files do I have in my current directory?") -if run_response.is_paused: - for tool in run_response.tools_awaiting_external_execution: - if tool.tool_name == "list_dir": - print(f"Executing {tool.tool_name} with args {tool.tool_args} externally") - # We execute the tool ourselves. You can also execute something completely external here. - result = tools.list_dir(**tool.tool_args) # type: ignore - # We have to set the result on the tool execution object so that the agent can continue - tool.result = result - - run_response = agent.continue_run(run_response=run_response) - pprint.pprint_run_response(run_response) -``` - -## Usage - - - - - - ```bash - pip install -U agno openai - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch external_tool_execution_toolkit.py - ``` - - - - - ```bash Mac - python external_tool_execution_toolkit.py - ``` - - ```bash Windows - python external_tool_execution_toolkit.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/user_input_required.mdx b/examples/basics/agent/human_in_the_loop/user_input_required.mdx deleted file mode 100644 index d671a142..00000000 --- a/examples/basics/agent/human_in_the_loop/user_input_required.mdx +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: User Input Required for Tool Execution ---- - -This example demonstrates how to create tools that require user input before execution, allowing for dynamic data collection during agent runs. - -## Code - -```python user_input_required.py -"""🤝 Human-in-the-Loop: Allowing users to provide input externally - -This example shows how to use the `requires_user_input` parameter to allow users to provide input externally. -""" - -from typing import List - -from agno.agent import Agent -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.tools.function import UserInputField -from agno.utils import pprint - - -# You can either specify the user_input_fields leave empty for all fields to be provided by the user -@tool(requires_user_input=True, user_input_fields=["to_address"]) -def send_email(subject: str, body: str, to_address: str) -> str: - """ - Send an email. - - Args: - subject (str): The subject of the email. - body (str): The body of the email. - to_address (str): The address to send the email to. - """ - return f"Sent email to {to_address} with subject {subject} and body {body}" - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[send_email], - markdown=True, -) - -run_response = agent.run( - "Send an email with the subject 'Hello' and the body 'Hello, world!'" -) -if run_response.is_paused: - for tool in run_response.tools_requiring_user_input: # type: ignore - input_schema: List[UserInputField] = tool.user_input_schema # type: ignore - - for field in input_schema: - # Get user input for each field in the schema - field_type = field.field_type - field_description = field.description - - # Display field information to the user - print(f"\nField: {field.name}") - print(f"Description: {field_description}") - print(f"Type: {field_type}") - - # Get user input - if field.value is None: - user_value = input(f"Please enter a value for {field.name}: ") - else: - print(f"Value: {field.value}") - user_value = field.value - - # Update the field value - field.value = user_value - - run_response = agent.continue_run( - run_response=run_response - ) # or agent.continue_run(run_response=run_response) - pprint.pprint_run_response(run_response) - -# Or for simple debug flow -# agent.print_response("Send an email with the subject 'Hello' and the body 'Hello, world!'") -``` - -## Usage - - - - - - ```bash - pip install -U agno openai - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch user_input_required.py - ``` - - - - - ```bash Mac - python user_input_required.py - ``` - - ```bash Windows - python user_input_required.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/user_input_required_all_fields.mdx b/examples/basics/agent/human_in_the_loop/user_input_required_all_fields.mdx deleted file mode 100644 index 37af60b1..00000000 --- a/examples/basics/agent/human_in_the_loop/user_input_required_all_fields.mdx +++ /dev/null @@ -1,123 +0,0 @@ ---- -title: User Input Required All Fields ---- - -This example demonstrates how to use the `requires_user_input` parameter to collect input for all fields in a tool. It shows how to handle user input schema and collect values for each required field. - -## Code - -```python user_input_required_all_fields.py -"""🤝 Human-in-the-Loop: Allowing users to provide input externally - -This example shows how to use the `requires_user_input` parameter to allow users to provide input externally. -""" - -from typing import List - -from agno.agent import Agent -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.tools.function import UserInputField -from agno.utils import pprint - - -@tool(requires_user_input=True) -def send_email(subject: str, body: str, to_address: str) -> str: - """ - Send an email. - - Args: - subject (str): The subject of the email. - body (str): The body of the email. - to_address (str): The address to send the email to. - """ - return f"Sent email to {to_address} with subject {subject} and body {body}" - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[send_email], - markdown=True, -) - -run_response = agent.run("Send an email please") -if run_response.is_paused: # Or agent.run_response.is_paused - for tool in run_response.tools_requiring_user_input: # type: ignore - input_schema: List[UserInputField] = tool.user_input_schema # type: ignore - - for field in input_schema: - # Get user input for each field in the schema - field_type = field.field_type - field_description = field.description - - # Display field information to the user - print(f"\nField: {field.name}") - print(f"Description: {field_description}") - print(f"Type: {field_type}") - - # Get user input - if field.value is None: - user_value = input(f"Please enter a value for {field.name}: ") - - # Update the field value - field.value = user_value - - run_response = agent.continue_run(run_response=run_response) - pprint.pprint_run_response(run_response) - -# Or for simple debug flow -# agent.print_response("Send an email please") -``` - -## Usage - - - - - - ```bash - pip install -U agno openai - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch user_input_required_all_fields.py - ``` - - - - - ```bash Mac - python user_input_required_all_fields.py - ``` - - ```bash Windows - python user_input_required_all_fields.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/user_input_required_async.mdx b/examples/basics/agent/human_in_the_loop/user_input_required_async.mdx deleted file mode 100644 index 4f633aeb..00000000 --- a/examples/basics/agent/human_in_the_loop/user_input_required_async.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: User Input Required Async ---- - -This example demonstrates how to use the `requires_user_input` parameter with asynchronous operations. It shows how to collect specific user input fields in an async environment. - -## Code - -```python user_input_required_async.py -"""🤝 Human-in-the-Loop: Allowing users to provide input externally""" - -import asyncio -from typing import List - -from agno.agent import Agent -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.tools.function import UserInputField -from agno.utils import pprint - - -# You can either specify the user_input_fields leave empty for all fields to be provided by the user -@tool(requires_user_input=True, user_input_fields=["to_address"]) -def send_email(subject: str, body: str, to_address: str) -> str: - """ - Send an email. - - Args: - subject (str): The subject of the email. - body (str): The body of the email. - to_address (str): The address to send the email to. - """ - return f"Sent email to {to_address} with subject {subject} and body {body}" - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[send_email], - markdown=True, -) - -run_response = asyncio.run( - agent.arun("Send an email with the subject 'Hello' and the body 'Hello, world!'") -) -if run_response.is_paused: # Or agent.run_response.is_paused - for tool in run_response.tools_requiring_user_input: # type: ignore - input_schema: List[UserInputField] = tool.user_input_schema # type: ignore - - for field in input_schema: - # Get user input for each field in the schema - field_type = field.field_type - field_description = field.description - - # Display field information to the user - print(f"\nField: {field.name}") - print(f"Description: {field_description}") - print(f"Type: {field_type}") - - # Get user input - if field.value is None: - user_value = input(f"Please enter a value for {field.name}: ") - else: - print(f"Value: {field.value}") - user_value = field.value - - # Update the field value - field.value = user_value - - run_response = asyncio.run(agent.acontinue_run(run_response=run_response)) - pprint.pprint_run_response(run_response) - -# Or for simple debug flow -# agent.print_response("Send an email with the subject 'Hello' and the body 'Hello, world!'") -``` - -## Usage - - - - - - ```bash - pip install -U agno openai - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch user_input_required_async.py - ``` - - - - - ```bash Mac - python user_input_required_async.py - ``` - - ```bash Windows - python user_input_required_async.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/user_input_required_stream.mdx b/examples/basics/agent/human_in_the_loop/user_input_required_stream.mdx deleted file mode 100644 index 0a536112..00000000 --- a/examples/basics/agent/human_in_the_loop/user_input_required_stream.mdx +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: User Input Required Stream ---- - -This example demonstrates how to use the `requires_user_input` parameter with streaming responses. It shows how to collect specific user input fields while maintaining real-time streaming capabilities. - -## Code - -```python user_input_required_stream.py -"""🤝 Human-in-the-Loop: Allowing users to provide input externally - -This example shows how to use the `requires_user_input` parameter to allow users to provide input externally. -""" - -from typing import List - -from agno.agent import Agent -from agno.db.sqlite import SqliteDb -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.tools.function import UserInputField -from agno.utils import pprint - - -# You can either specify the user_input_fields leave empty for all fields to be provided by the user -@tool(requires_user_input=True, user_input_fields=["to_address"]) -def send_email(subject: str, body: str, to_address: str) -> str: - """ - Send an email. - - Args: - subject (str): The subject of the email. - body (str): The body of the email. - to_address (str): The address to send the email to. - """ - return f"Sent email to {to_address} with subject {subject} and body {body}" - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[send_email], - markdown=True, - db=SqliteDb(session_table="test_session", db_file="tmp/example.db"), -) - -for run_event in agent.run( - "Send an email with the subject 'Hello' and the body 'Hello, world!'", stream=True -): - if run_event.is_paused: # Or agent.run_response.is_paused - for tool in run_event.tools_requiring_user_input: # type: ignore - input_schema: List[UserInputField] = tool.user_input_schema # type: ignore - - for field in input_schema: - # Get user input for each field in the schema - field_type = field.field_type - field_description = field.description - - # Display field information to the user - print(f"\nField: {field.name}") - print(f"Description: {field_description}") - print(f"Type: {field_type}") - - # Get user input - if field.value is None: - user_value = input(f"Please enter a value for {field.name}: ") - else: - print(f"Value: {field.value}") - user_value = field.value - - # Update the field value - field.value = user_value - - run_response = agent.continue_run( - run_id=run_event.run_id, updated_tools=run_event.tools - ) - pprint.pprint_run_response(run_response) - -# Or for simple debug flow -# agent.print_response("Send an email with the subject 'Hello' and the body 'Hello, world!'", stream=True) -``` - -## Usage - - - - - - ```bash - pip install -U agno openai - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch user_input_required_stream.py - ``` - - - - - ```bash Mac - python user_input_required_stream.py - ``` - - ```bash Windows - python user_input_required_stream.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file diff --git a/examples/basics/agent/human_in_the_loop/user_input_required_stream_async.mdx b/examples/basics/agent/human_in_the_loop/user_input_required_stream_async.mdx deleted file mode 100644 index b40ecebe..00000000 --- a/examples/basics/agent/human_in_the_loop/user_input_required_stream_async.mdx +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: User Input Required Stream Async ---- - -This example demonstrates how to use the `requires_user_input` parameter with async streaming responses. It shows how to collect specific user input fields in an asynchronous environment while maintaining real-time streaming. - -## Code - -```python user_input_required_stream_async.py -"""🤝 Human-in-the-Loop: Allowing users to provide input externally - -This example shows how to use the `requires_user_input` parameter to allow users to provide input externally. -""" - -import asyncio -from typing import List - -from agno.agent import Agent -from agno.db.sqlite import SqliteDb -from agno.models.openai import OpenAIChat -from agno.tools import tool -from agno.tools.function import UserInputField - - -# You can either specify the user_input_fields leave empty for all fields to be provided by the user -@tool(requires_user_input=True, user_input_fields=["to_address"]) -def send_email(subject: str, body: str, to_address: str) -> str: - """ - Send an email. - - Args: - subject (str): The subject of the email. - body (str): The body of the email. - to_address (str): The address to send the email to. - """ - return f"Sent email to {to_address} with subject {subject} and body {body}" - - -agent = Agent( - model=OpenAIChat(id="gpt-5-mini"), - tools=[send_email], - markdown=True, - db=SqliteDb(session_table="test_session", db_file="tmp/example.db"), -) - - -async def main(): - async for run_event in agent.arun( - "Send an email with the subject 'Hello' and the body 'Hello, world!'", - stream=True, - ): - if run_event.is_paused: # Or agent.run_response.is_paused - for tool in run_event.tools_requiring_user_input: # type: ignore - input_schema: List[UserInputField] = tool.user_input_schema # type: ignore - - for field in input_schema: - # Get user input for each field in the schema - field_type = field.field_type - field_description = field.description - - # Display field information to the user - print(f"\nField: {field.name}") - print(f"Description: {field_description}") - print(f"Type: {field_type}") - - # Get user input - if field.value is None: - user_value = input(f"Please enter a value for {field.name}: ") - else: - print(f"Value: {field.value}") - user_value = field.value - - # Update the field value - field.value = user_value - - async for resp in agent.acontinue_run( # type: ignore - run_id=run_event.run_id, - updated_tools=run_event.tools, - stream=True, - ): - print(resp.content, end="") - - # Or for simple debug flow - # agent.aprint_response("Send an email with the subject 'Hello' and the body 'Hello, world!'") - - -if __name__ == "__main__": - asyncio.run(main()) -``` - -## Usage - - - - - - ```bash - pip install -U agno openai - ``` - - - - - - - ```bash Mac/Linux - export OPENAI_API_KEY="your_openai_api_key_here" - ``` - - ```bash Windows - $Env:OPENAI_API_KEY="your_openai_api_key_here" - ``` - - - - - Create a Python file and add the above code. - ```bash - touch user_input_required_stream_async.py - ``` - - - - - ```bash Mac - python user_input_required_stream_async.py - ``` - - ```bash Windows - python user_input_required_stream_async.py - ``` - - - - - Explore all the available cookbooks in the Agno repository. Click the link below to view the code on GitHub: - - - Agno Cookbooks on GitHub - - - \ No newline at end of file