Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,21 @@ public static async Task RunAgentAsync(AIAgent agent, string title, string userP

private static async Task StreamAgentResponseAsync(AIAgent agent, AgentSession session, AgentModeProvider? modeProvider, string userInput, int? maxContextWindowTokens, int? maxOutputTokens)
{
// Initial user input
var approvalRequests = await StreamAndCollectApprovalsAsync(agent.RunStreamingAsync(userInput, session), modeProvider, session, maxContextWindowTokens, maxOutputTokens);
var messagesToSend = PromptForApprovals(approvalRequests);

// Loop while there are approval responses to send back
while (messagesToSend is not null)
{
approvalRequests = await StreamAndCollectApprovalsAsync(agent.RunStreamingAsync(messagesToSend, session), modeProvider, session, maxContextWindowTokens, maxOutputTokens);
messagesToSend = PromptForApprovals(approvalRequests);
}
}

private static async Task<List<ToolApprovalRequestContent>> StreamAndCollectApprovalsAsync(IAsyncEnumerable<AgentResponseUpdate> updates, AgentModeProvider? modeProvider, AgentSession session, int? maxContextWindowTokens, int? maxOutputTokens)
{
var approvalRequests = new List<ToolApprovalRequestContent>();
string mode = modeProvider?.GetMode(session) ?? "unknown";
System.Console.ForegroundColor = GetModeColor(mode);
System.Console.Write($"\n[{mode}] Agent: ");
Expand All @@ -72,7 +87,7 @@ private static async Task StreamAgentResponseAsync(AIAgent agent, AgentSession s

try
{
await foreach (var update in agent.RunStreamingAsync(userInput, session))
await foreach (var update in updates)
{
foreach (var content in update.Contents)
{
Expand All @@ -96,6 +111,17 @@ private static async Task StreamAgentResponseAsync(AIAgent agent, AgentSession s
hasTextOutput = false;
spinner.Start();
}
else if (content is ToolApprovalRequestContent approvalRequest)
{
await spinner.StopAsync();
approvalRequests.Add(approvalRequest);
string toolName = approvalRequest.ToolCall is FunctionCallContent fc ? ToolCallFormatter.Format(fc) : approvalRequest.ToolCall?.ToString() ?? "unknown";
System.Console.ForegroundColor = ConsoleColor.Yellow;
System.Console.Write(hasTextOutput ? "\n\n ⚠️ Approval needed: " : "\n ⚠️ Approval needed: ");
System.Console.Write(toolName);
System.Console.ForegroundColor = GetModeColor(mode);
hasTextOutput = false;
}
else if (content is ErrorContent errorContent)
{
await spinner.StopAsync();
Expand Down Expand Up @@ -174,7 +200,7 @@ private static async Task StreamAgentResponseAsync(AIAgent agent, AgentSession s

await spinner.StopAsync();

if (!hasReceivedAnyText)
if (!hasReceivedAnyText && approvalRequests.Count == 0)
{
System.Console.ForegroundColor = ConsoleColor.DarkYellow;
System.Console.Write("\n (no text response from agent)");
Expand All @@ -183,6 +209,59 @@ private static async Task StreamAgentResponseAsync(AIAgent agent, AgentSession s
System.Console.ResetColor();
System.Console.WriteLine();
System.Console.WriteLine();

return approvalRequests;
}

/// <summary>
/// Prompts the user for approval of each tool approval request.
/// Returns a list of messages to send back to the agent, or <see langword="null"/> if there are no requests.
/// </summary>
private static List<ChatMessage>? PromptForApprovals(List<ToolApprovalRequestContent> approvalRequests)
{
if (approvalRequests.Count == 0)
{
return null;
}

var responses = new List<AIContent>();
foreach (var request in approvalRequests)
{
string toolName = request.ToolCall is FunctionCallContent fc ? ToolCallFormatter.Format(fc) : request.ToolCall?.ToString() ?? "unknown";

System.Console.ForegroundColor = ConsoleColor.Yellow;
System.Console.WriteLine($"\n 🔐 Tool approval required: {toolName}");
System.Console.ResetColor();
System.Console.WriteLine(" 1) Approve this call");
System.Console.WriteLine(" 2) Always approve this tool (any arguments)");
System.Console.WriteLine(" 3) Always approve this tool with these arguments");
System.Console.WriteLine(" 4) Deny");
System.Console.Write(" Choice [1-4]: ");

string? choice = System.Console.ReadLine()?.Trim();
AIContent response = choice switch
{
"2" => request.CreateAlwaysApproveToolResponse("User chose to always approve this tool"),
"3" => request.CreateAlwaysApproveToolWithArgumentsResponse("User chose to always approve this tool with these arguments"),
"4" => request.CreateResponse(approved: false, reason: "User denied"),
_ => request.CreateResponse(approved: true, reason: "User approved"),
};

string action = choice switch
{
"2" => "✅ Always approved (any args)",
"3" => "✅ Always approved (these args)",
"4" => "❌ Denied",
_ => "✅ Approved",
};
System.Console.ForegroundColor = ConsoleColor.DarkGray;
System.Console.WriteLine($" {action}");
System.Console.ResetColor();

responses.Add(response);
}

return [new ChatMessage(ChatRole.User, responses)];
}

private static void HandleModeCommand(AgentModeProvider? modeProvider, AgentSession session, string input)
Expand Down
115 changes: 62 additions & 53 deletions dotnet/samples/02-agents/Harness/Harness_Step01_Research/Program.cs
Original file line number Diff line number Diff line change
Expand Up @@ -29,31 +29,6 @@
const int MaxContextWindowTokens = 1_050_000;
const int MaxOutputTokens = 128_000;

// Create a compaction strategy based on the model's context window.
// gpt-5.4: 1,050,000 token context window, 128,000 max output tokens.
// Defaults: tool result eviction at 50% of input budget, truncation at 80%.
var compactionStrategy = new ContextWindowCompactionStrategy(
maxContextWindowTokens: MaxContextWindowTokens,
maxOutputTokens: MaxOutputTokens);

// Create an OpenAIClient that communicates with the Foundry responses service and get an IChatClient with stored output disabled
// so that chat history is managed locally by the agent framework.
// WARNING: DefaultAzureCredential is convenient for development but requires careful consideration in production.
// In production, consider using a specific credential (e.g., ManagedIdentityCredential) to avoid
// latency issues, unintended credential probing, and potential security risks from fallback mechanisms.
OpenAIClientOptions clientOptions = new() { Endpoint = new Uri(endpoint), RetryPolicy = new ClientRetryPolicy(3) };
IChatClient chatClient = new OpenAIClient(new BearerTokenPolicy(new DefaultAzureCredential(), "https://ai.azure.com/.default"), clientOptions)
.GetResponsesClient()
.AsIChatClientWithStoredOutputDisabled(deploymentName)
.AsBuilder()
.UseFunctionInvocation()
.UsePerServiceCallChatHistoryPersistence()
.UseAIContextProviders(new CompactionProvider(compactionStrategy))
.Build();

// Create web browsing tools for downloading and converting HTML pages to markdown.
var webBrowsingTools = new WebBrowsingTools();

// Create a ChatClientAgent with the Harness providers (TodoProvider and AgentModeProvider)
// and research-focused instructions including the mandatory planning workflow.
var instructions =
Expand Down Expand Up @@ -123,36 +98,70 @@ Also save intermediate notes and findings as you go — this helps with long mul
When a temporary file is no longer needed, delete it to keep file memory tidy.
""";

AIAgent agent = new ChatClientAgent(
chatClient,
new ChatClientAgentOptions
{
Name = "ResearchAgent",
Description = "A research assistant that plans and executes research tasks.",
AIContextProviders =
[
new TodoProvider(),
new AgentModeProvider(),
new FileMemoryProvider(
new FileSystemAgentFileStore(Path.Combine(AppContext.BaseDirectory, "agent-files")),
(_) => new FileMemoryState() { WorkingFolder = DateTime.UtcNow.ToString("yyyyMMdd_HHmmss") + "_" + Guid.NewGuid().ToString() })
],
RequirePerServiceCallChatHistoryPersistence = true,
UseProvidedChatClientAsIs = true,
ChatHistoryProvider = new InMemoryChatHistoryProvider(new InMemoryChatHistoryProviderOptions
// Create a compaction strategy based on the model's context window.
// gpt-5.4: 1,050,000 token context window, 128,000 max output tokens.
// Defaults: tool result eviction at 50% of input budget, truncation at 80%.
var compactionStrategy = new ContextWindowCompactionStrategy(
maxContextWindowTokens: MaxContextWindowTokens,
maxOutputTokens: MaxOutputTokens);

AIAgent agent =
// Create an OpenAIClient that communicates with the Foundry responses service.
new OpenAIClient(
// WARNING: DefaultAzureCredential is convenient for development but requires careful consideration in production.
// In production, consider using a specific credential (e.g., ManagedIdentityCredential) to avoid
// latency issues, unintended credential probing, and potential security risks from fallback mechanisms.
new BearerTokenPolicy(new DefaultAzureCredential(), "https://ai.azure.com/.default"),
new OpenAIClientOptions()
{
ChatReducer = compactionStrategy.AsChatReducer(),
}),
ChatOptions = new ChatOptions
Endpoint = new Uri(endpoint),
RetryPolicy = new ClientRetryPolicy(3) // Enable retries to improve resiliency.
})
.GetResponsesClient()
.AsIChatClientWithStoredOutputDisabled(deploymentName) // We want to manage chat history locally (not stored in the responses service), so that we can manage compaction ourselves.

// Build a ChatClient Pipeline
.AsBuilder()
.UseFunctionInvocation() // We are building our own stack from scratch so we need to include Function Invocation ourselves.
.UsePerServiceCallChatHistoryPersistence() // Save chat history updates to the session after each service call, rather than only at the end of the run.
.UseAIContextProviders(new CompactionProvider(compactionStrategy)) // Add Compaction before each service call to responses so that long function invocation loops don't overflow the context.

// Build our agent on top of the ChatClient Pipeline
.BuildAIAgent(
new ChatClientAgentOptions
{
// Set a high token limit for long research tasks with many tool calls and long outputs.
// This matches gpt-5.4's max output tokens, and should be adjusted depending on the model used and expected response length.
MaxOutputTokens = 128_000,
Instructions = instructions,
Reasoning = new() { Effort = ReasoningEffort.Medium },
Tools = [ResponseTool.CreateWebSearchTool().AsAITool(), .. webBrowsingTools.Tools],
},
});
Name = "ResearchAgent",
Description = "A research assistant that plans and executes research tasks.",
UseProvidedChatClientAsIs = true, // Since we built our own stack from scratch we need to tell the agent not to also add defaults like Function Invocation.
RequirePerServiceCallChatHistoryPersistence = true, // Since we are added the per service call persistence ChatClient, we need to tell the agent to not also store chat history at the end of the run.
ChatHistoryProvider = new InMemoryChatHistoryProvider( // Store chat history in memory in the session object. Will persist if the session is persisted.
new InMemoryChatHistoryProviderOptions
{
ChatReducer = compactionStrategy.AsChatReducer(), // Run compaction on the InMemory chat history when it gets too large.
}),
AIContextProviders =
[
new TodoProvider(), // Add an AIContextProvider to allow the agent to create a TODO list, which is stored in the session.
new AgentModeProvider(), // Add an AIContextProvider that tracks the agent mode and allows switching mode. Current mode is stored in the session.
new FileMemoryProvider( // Add an AIContextProvider that can store memories in files under a session specific working folder.
new FileSystemAgentFileStore(Path.Combine(AppContext.BaseDirectory, "agent-files")),
(_) => new FileMemoryState() { WorkingFolder = DateTime.UtcNow.ToString("yyyyMMdd_HHmmss") + "_" + Guid.NewGuid().ToString() })
],
ChatOptions = new ChatOptions
{
Instructions = instructions,
Tools =
[
ResponseTool.CreateWebSearchTool().AsAITool(), // Add the foundry hosted web search tool that runs in the service.
new WebBrowsingTool(), // Add a local web browsing tool that converts html to markdown.
],
MaxOutputTokens = MaxOutputTokens, // Set a high token limit for long research tasks with many tool calls and long outputs.
Reasoning = new() { Effort = ReasoningEffort.Medium },
},
})
.AsBuilder()
.UseToolApproval() // Add the ability to auto approve tools once a user has said they don't want to be asked again. Approval rules are tied to the session.
.Build();

// Run the interactive console session using the shared HarnessConsole helper.
await HarnessConsole.RunAgentAsync(agent, title: "Research Assistant", userPrompt: "Enter a research topic to get started.", maxContextWindowTokens: MaxContextWindowTokens, maxOutputTokens: MaxOutputTokens);
Original file line number Diff line number Diff line change
Expand Up @@ -2,25 +2,34 @@

using System.ComponentModel;
using System.Net;
using System.Text.Json;
using System.Text.RegularExpressions;
using Microsoft.Extensions.AI;

namespace SampleApp;

/// <summary>
/// Provides a web browsing tool that downloads HTML pages and converts them to markdown.
/// An AI function that downloads HTML pages and converts them to markdown.
/// </summary>
internal sealed partial class WebBrowsingTools
internal sealed partial class WebBrowsingTool : AIFunction
{
private static readonly HttpClient s_httpClient = new();
private readonly AIFunction _inner = AIFunctionFactory.Create(DownloadUriAsync);

/// <summary>
/// Gets the web browsing tools.
/// </summary>
public IList<AITool> Tools { get; } =
[
AIFunctionFactory.Create(DownloadUriAsync),
];
/// <inheritdoc/>
public override string Name => this._inner.Name;

/// <inheritdoc/>
public override string Description => this._inner.Description;

/// <inheritdoc/>
public override JsonElement JsonSchema => this._inner.JsonSchema;

/// <inheritdoc/>
protected override ValueTask<object?> InvokeCoreAsync(
AIFunctionArguments arguments,
CancellationToken cancellationToken) =>
this._inner.InvokeAsync(arguments, cancellationToken);

[Description("Download the html from the given url as markdown")]
private static async Task<string> DownloadUriAsync(
Expand Down
5 changes: 5 additions & 0 deletions dotnet/src/Microsoft.Agents.AI/AgentJsonUtilities.cs
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,11 @@ private static JsonSerializerOptions CreateDefaultOptions()
// AgentModeProvider types
[JsonSerializable(typeof(AgentModeState))]

// ToolApprovalAgent types
[JsonSerializable(typeof(ToolApprovalState))]
[JsonSerializable(typeof(ToolApprovalRule))]
[JsonSerializable(typeof(List<ToolApprovalRule>), TypeInfoPropertyName = "ToolApprovalRuleList")]

// FileMemoryProvider types
[JsonSerializable(typeof(FileMemoryState))]
[JsonSerializable(typeof(FileSearchResult))]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ public override async IAsyncEnumerable<ChatResponseUpdate> GetStreamingResponseA
while (hasUpdates)
{
var update = enumerator.Current;
responseUpdates.Add(update);
responseUpdates.Add(update.Clone());

// If the service returned a real ConversationId on any update, remember that.
// Otherwise stamp our sentinel so FICC treats this as service-managed —
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
// Copyright (c) Microsoft. All rights reserved.

using System.Diagnostics.CodeAnalysis;
using Microsoft.Extensions.AI;
using Microsoft.Shared.DiagnosticIds;
using Microsoft.Shared.Diagnostics;

namespace Microsoft.Agents.AI;

/// <summary>
/// Wraps a <see cref="ToolApprovalResponseContent"/> with additional "always approve" settings,
/// enabling the <see cref="ToolApprovalAgent"/> middleware to record standing approval rules
/// so that future matching tool calls are auto-approved without user interaction.
/// </summary>
/// <remarks>
/// <para>
/// Instances of this class should not be created directly. Instead, use the extension methods
/// <see cref="ToolApprovalRequestContentExtensions.CreateAlwaysApproveToolResponse"/> or
/// <see cref="ToolApprovalRequestContentExtensions.CreateAlwaysApproveToolWithArgumentsResponse"/>
/// on <see cref="ToolApprovalRequestContent"/> to create instances with the appropriate flags set.
/// </para>
/// <para>
/// The <see cref="ToolApprovalAgent"/> middleware will unwrap the <see cref="InnerResponse"/> to forward
/// to the inner agent, while extracting the approval settings to persist as <see cref="ToolApprovalRule"/>
/// entries in the session state.
/// </para>
/// </remarks>
[Experimental(DiagnosticIds.Experiments.AgentsAIExperiments)]
public sealed class AlwaysApproveToolApprovalResponseContent : AIContent
{
/// <summary>
/// Initializes a new instance of the <see cref="AlwaysApproveToolApprovalResponseContent"/> class.
/// </summary>
/// <param name="innerResponse">The underlying approval response to forward to the agent.</param>
/// <param name="alwaysApproveTool">
/// When <see langword="true"/>, all future calls to this tool type will be auto-approved.
/// </param>
/// <param name="alwaysApproveToolWithArguments">
/// When <see langword="true"/>, all future calls to this tool type with the same arguments will be auto-approved.
/// </param>
internal AlwaysApproveToolApprovalResponseContent(
ToolApprovalResponseContent innerResponse,
bool alwaysApproveTool,
bool alwaysApproveToolWithArguments)
{
this.InnerResponse = Throw.IfNull(innerResponse);
this.AlwaysApproveTool = alwaysApproveTool;
this.AlwaysApproveToolWithArguments = alwaysApproveToolWithArguments;
}

/// <summary>
/// Gets the underlying <see cref="ToolApprovalResponseContent"/> that will be forwarded to the inner agent.
/// </summary>
public ToolApprovalResponseContent InnerResponse { get; }

/// <summary>
/// Gets a value indicating whether all future calls to the same tool should be auto-approved
/// regardless of the arguments provided.
/// </summary>
public bool AlwaysApproveTool { get; }

/// <summary>
/// Gets a value indicating whether all future calls to the same tool with the exact same
/// arguments should be auto-approved.
/// </summary>
public bool AlwaysApproveToolWithArguments { get; }
}
Loading
Loading