Skip to content

Commit

Permalink
.Net: Hybrid model orchestration sample (#10503)
Browse files Browse the repository at this point in the history
### Motivation, Context and Description
This PR adds a sample demonstrating one of the many possible
implementations of the hybrid model orchestration pattern: fallback
orchestration
  • Loading branch information
SergeyMenshykh authored Feb 12, 2025
1 parent 87d5a50 commit 88fbaf8
Showing 4 changed files with 268 additions and 0 deletions.
1 change: 1 addition & 0 deletions dotnet/Directory.Packages.props
Original file line number Diff line number Diff line change
@@ -71,6 +71,7 @@
<PackageVersion Include="Microsoft.Extensions.AI" Version="9.1.0-preview.1.25064.3" />
<PackageVersion Include="Microsoft.Extensions.AI.Abstractions" Version="9.1.0-preview.1.25064.3" />
<PackageVersion Include="Microsoft.Extensions.AI.AzureAIInference" Version="9.1.0-preview.1.25064.3" />
<PackageVersion Include="Microsoft.Extensions.AI.OpenAI" Version="9.1.0-preview.1.25064.3" />
<PackageVersion Include="Microsoft.Extensions.Configuration" Version="8.0.0" />
<PackageVersion Include="Microsoft.Extensions.Configuration.Abstractions" Version="8.0.0" />
<PackageVersion Include="Microsoft.Extensions.Configuration.Binder" Version="8.0.2" />
264 changes: 264 additions & 0 deletions dotnet/samples/Concepts/ChatCompletion/HybridCompletion_Fallback.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,264 @@
// Copyright (c) Microsoft. All rights reserved.

using System.ClientModel;
using System.ClientModel.Primitives;
using System.ComponentModel;
using System.Net;
using System.Runtime.CompilerServices;
using Azure.AI.OpenAI;
using Azure.Identity;
using Microsoft.Extensions.AI;
using Microsoft.SemanticKernel;

namespace ChatCompletion;

/// <summary>
/// This example demonstrates how an AI application can use code to attempt inference with the first available chat client in the list, falling back to the next client if the previous one fails.
/// The <see cref="FallbackChatClient"/> class handles all the fallback complexities, abstracting them away from the application code.
/// Since the <see cref="FallbackChatClient"/> class implements the <see cref="IChatClient"/> interface, the chat client used for inference the application can be easily replaced with the <see cref="FallbackChatClient"/>.
/// </summary>
/// <remarks>
/// The <see cref="FallbackChatClient"/> class is useful when an application utilizes multiple models and needs to switch between them based on the situation.
/// For example, the application may use a cloud-based model by default and seamlessly fall back to a local model when the cloud model is unavailable (e.g., in offline mode), and vice versa.
/// Additionally, the application can enhance resilience by employing several cloud models, falling back to the next one if the previous model fails.
/// </remarks>
public class HybridCompletion_Fallback(ITestOutputHelper output) : BaseTest(output)
{
/// <summary>
/// This example demonstrates how to perform completion using the <see cref="FallbackChatClient"/>, which falls back to an available model when the primary model is unavailable.
/// </summary>
[Fact]
public async Task FallbackToAvailableModelAsync()
{
// Create an unavailable chat client that fails with 503 Service Unavailable HTTP status code
IChatClient unavailableChatClient = CreateUnavailableOpenAIChatClient();

// Create a cloud available chat client
IChatClient availableChatClient = CreateAzureOpenAIChatClient();

// Create a fallback chat client that will fallback to the available chat client when unavailable chat client fails
IChatClient fallbackChatClient = new FallbackChatClient([unavailableChatClient, availableChatClient]);

ChatOptions chatOptions = new() { Tools = [AIFunctionFactory.Create(GetWeather, new AIFunctionFactoryCreateOptions { Name = "GetWeather" })] };

var result = await fallbackChatClient.CompleteAsync("Do I need an umbrella?", chatOptions);

Output.WriteLine(result);

[Description("Gets the weather")]
string GetWeather() => "It's sunny";
}

/// <summary>
/// This example demonstrates how to perform streaming completion using the <see cref="FallbackChatClient"/>, which falls back to an available model when the primary model is unavailable.
/// </summary>
[Fact]
public async Task FallbackToAvailableModelStreamingAsync()
{
// Create an unavailable chat client that fails with 503 Service Unavailable HTTP status code
IChatClient unavailableChatClient = CreateUnavailableOpenAIChatClient();

// Create a cloud available chat client
IChatClient availableChatClient = CreateAzureOpenAIChatClient();

// Create a fallback chat client that will fallback to the available chat client when unavailable chat client fails
IChatClient fallbackChatClient = new FallbackChatClient([unavailableChatClient, availableChatClient]);

ChatOptions chatOptions = new() { Tools = [AIFunctionFactory.Create(GetWeather, new AIFunctionFactoryCreateOptions { Name = "GetWeather" })] };

var result = fallbackChatClient.CompleteStreamingAsync("Do I need an umbrella?", chatOptions);

await foreach (var update in result)
{
Output.WriteLine(update);
}

[Description("Gets the weather")]
string GetWeather() => "It's sunny";
}

private static IChatClient CreateUnavailableOpenAIChatClient()
{
AzureOpenAIClientOptions options = new()
{
Transport = new HttpClientPipelineTransport(
new HttpClient
(
new StubHandler(new HttpClientHandler(), async (response) => { response.StatusCode = System.Net.HttpStatusCode.ServiceUnavailable; })
)
)
};

IChatClient openAiClient = new AzureOpenAIClient(new Uri(TestConfiguration.AzureOpenAI.Endpoint), new AzureCliCredential(), options).AsChatClient(TestConfiguration.AzureOpenAI.ChatDeploymentName);

return new ChatClientBuilder(openAiClient)
.UseFunctionInvocation()
.Build();
}

private static IChatClient CreateAzureOpenAIChatClient()
{
IChatClient chatClient = new AzureOpenAIClient(new Uri(TestConfiguration.AzureOpenAI.Endpoint), new AzureCliCredential()).AsChatClient(TestConfiguration.AzureOpenAI.ChatDeploymentName);

return new ChatClientBuilder(chatClient)
.UseFunctionInvocation()
.Build();
}

protected sealed class StubHandler(HttpMessageHandler innerHandler, Func<HttpResponseMessage, Task> handler) : DelegatingHandler(innerHandler)
{
protected override async Task<HttpResponseMessage> SendAsync(HttpRequestMessage request, CancellationToken cancellationToken)
{
var result = await base.SendAsync(request, cancellationToken);

await handler(result);

return result;
}
}
}

/// <summary>
/// Represents a chat client that performs inference using the first available chat client in the list, falling back to the next one if the previous client fails.
/// </summary>
internal sealed class FallbackChatClient : IChatClient
{
private readonly IList<IChatClient> _chatClients;
private static readonly List<HttpStatusCode> s_defaultFallbackStatusCodes = new()
{
HttpStatusCode.InternalServerError,
HttpStatusCode.NotImplemented,
HttpStatusCode.BadGateway,
HttpStatusCode.ServiceUnavailable,
HttpStatusCode.GatewayTimeout
};

/// <summary>
/// Initializes a new instance of the <see cref="FallbackChatClient"/> class.
/// </summary>
/// <param name="chatClients">The chat clients to fallback to.</param>
public FallbackChatClient(IList<IChatClient> chatClients)
{
this._chatClients = chatClients?.Any() == true ? chatClients : throw new ArgumentException("At least one chat client must be provided.", nameof(chatClients));
}

/// <summary>
/// Gets or sets the HTTP status codes that will trigger the fallback to the next chat client.
/// </summary>
public List<HttpStatusCode>? FallbackStatusCodes { get; set; }

/// <inheritdoc/>
public ChatClientMetadata Metadata => new();

/// <inheritdoc/>
public async Task<Microsoft.Extensions.AI.ChatCompletion> CompleteAsync(IList<ChatMessage> chatMessages, ChatOptions? options = null, CancellationToken cancellationToken = default)
{
for (int i = 0; i < this._chatClients.Count; i++)
{
var chatClient = this._chatClients.ElementAt(i);

try
{
return await chatClient.CompleteAsync(chatMessages, options, cancellationToken).ConfigureAwait(false);
}
catch (Exception ex)
{
if (this.ShouldFallbackToNextClient(ex, i, this._chatClients.Count))
{
continue;
}

throw;
}
}

// If all clients fail, throw an exception or return a default value
throw new InvalidOperationException("Neither of the chat clients could complete the inference.");
}

/// <inheritdoc/>
public async IAsyncEnumerable<StreamingChatCompletionUpdate> CompleteStreamingAsync(IList<ChatMessage> chatMessages, ChatOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default)
{
for (int i = 0; i < this._chatClients.Count; i++)
{
var chatClient = this._chatClients.ElementAt(i);

IAsyncEnumerable<StreamingChatCompletionUpdate> completionStream = chatClient.CompleteStreamingAsync(chatMessages, options, cancellationToken);

ConfiguredCancelableAsyncEnumerable<StreamingChatCompletionUpdate>.Enumerator enumerator = completionStream.ConfigureAwait(false).GetAsyncEnumerator();

try
{
try
{
// Move to the first update to reveal any exceptions.
if (!await enumerator.MoveNextAsync())
{
yield break;
}
}
catch (Exception ex)
{
if (this.ShouldFallbackToNextClient(ex, i, this._chatClients.Count))
{
continue;
}

throw;
}

// Yield the first update.
yield return enumerator.Current;

// Yield the rest of the updates.
while (await enumerator.MoveNextAsync())
{
yield return enumerator.Current;
}

// The stream has ended so break the while loop.
break;
}
finally
{
await enumerator.DisposeAsync();
}
}
}

private bool ShouldFallbackToNextClient(Exception ex, int clientIndex, int numberOfClients)
{
// If the exception is thrown by the last client then don't fallback.
if (clientIndex == numberOfClients - 1)
{
return false;
}

HttpStatusCode? statusCode = ex switch
{
HttpOperationException operationException => operationException.StatusCode,
HttpRequestException httpRequestException => httpRequestException.StatusCode,
ClientResultException clientResultException => (HttpStatusCode?)clientResultException.Status,
_ => throw new InvalidOperationException($"Unsupported exception type: {ex.GetType()}."),
};

if (statusCode is null)
{
throw new InvalidOperationException("The exception does not contain an HTTP status code.");
}

return (this.FallbackStatusCodes ?? s_defaultFallbackStatusCodes).Contains(statusCode!.Value);
}

/// <inheritdoc/>
public void Dispose()
{
// We don't own the chat clients so we don't dispose them.
}

/// <inheritdoc/>
public object? GetService(Type serviceType, object? serviceKey = null)
{
return null;
}
}
2 changes: 2 additions & 0 deletions dotnet/samples/Concepts/Concepts.csproj
Original file line number Diff line number Diff line change
@@ -15,9 +15,11 @@

<ItemGroup>
<PackageReference Include="Docker.DotNet" />
<PackageReference Include="Microsoft.Extensions.AI.OpenAI" />
<PackageReference Include="Microsoft.ML.Tokenizers.Data.Cl100kBase" />
<PackageReference Include="Microsoft.NET.Test.Sdk" />
<PackageReference Include="Npgsql" />
<PackageReference Include="OpenAI" />
<PackageReference Include="xRetry" />
<PackageReference Include="xunit" />
<PackageReference Include="xunit.abstractions" />
1 change: 1 addition & 0 deletions dotnet/samples/Concepts/README.md
Original file line number Diff line number Diff line change
@@ -68,6 +68,7 @@ dotnet test -l "console;verbosity=detailed" --filter "FullyQualifiedName=ChatCom
- [Google_GeminiChatCompletionStreaming](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/Google_GeminiChatCompletionStreaming.cs)
- [Google_GeminiGetModelResult](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/Google_GeminiGetModelResult.cs)
- [Google_GeminiVision](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/Google_GeminiVision.cs)
- [HybridCompletion_Fallback](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/HybridCompletion_Fallback.cs)
- [OpenAI_ChatCompletion](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletion.cs)
- [OpenAI_ChatCompletionStreaming](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletionStreaming.cs)
- [OpenAI_ChatCompletionWithVision](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletionWithVision.cs)

0 comments on commit 88fbaf8

Please sign in to comment.