Skip to content

Commit

Permalink
.Net: Sample Code Showcasing Usage of Reasoning Models in OpenAI and …
Browse files Browse the repository at this point in the history
…AzureOpenAI (#10558)

### Motivation and Context
1. Required: showing usage of reasoning effort.
2. Problem: Controlling reasoning effort  - no sample
3. Scenario: Using reasoning effort to benefit from the new amazing
models :)

### Description
This pull request adds sample code that demonstrates how to leverage
reasoning models in a ChatCompletion on AzureOpenAI and OpenAI.

This implementation how to leverage LLM Reasoning capabilities but also
complements the phenomenal code from Roger Barreto (@RogerBarreto),
further strengthening Semantic Kernel overall solution.

### Contribution Checklist

<!-- Before submitting this PR, please make sure: -->

- [ ] The code builds clean without any errors or warnings
- [ ] The PR follows the [SK Contribution
Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md)
and the [pre-submission formatting
script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts)
raises no violations
- [ ] All unit tests pass, and I have added new tests where possible
- [ ] I didn't break anyone 😄

---------

Co-authored-by: Roger Barreto <19890735+RogerBarreto@users.noreply.github.com>
  • Loading branch information
joslat and RogerBarreto authored Feb 28, 2025
1 parent fd27470 commit 4fdaf67
Showing 4 changed files with 216 additions and 39 deletions.
Original file line number Diff line number Diff line change
@@ -11,9 +11,14 @@ namespace ChatCompletion;
// The following example shows how to use Semantic Kernel with Azure OpenAI API
public class AzureOpenAI_ChatCompletion(ITestOutputHelper output) : BaseTest(output)
{
/// <summary>
/// Sample showing how to use <see cref="Kernel"/> with chat completion and chat prompt syntax.
/// </summary>
[Fact]
public async Task ChatPromptAsync()
{
Console.WriteLine("======== Azure Open AI - Chat Completion ========");

Assert.NotNull(TestConfiguration.AzureOpenAI.ChatDeploymentName);
Assert.NotNull(TestConfiguration.AzureOpenAI.Endpoint);

@@ -39,8 +44,8 @@ public async Task ChatPromptAsync()
apiKey: TestConfiguration.AzureOpenAI.ApiKey,
modelId: TestConfiguration.AzureOpenAI.ChatModelId);
}
var kernel = kernelBuilder.Build();

var kernel = kernelBuilder.Build();
var reply = await kernel.InvokePromptAsync(chatPrompt.ToString());

chatPrompt.AppendLine($"<message role=\"assistant\"><![CDATA[{reply}]]></message>");
@@ -51,54 +56,30 @@ public async Task ChatPromptAsync()
Console.WriteLine(reply);
}

/// <summary>
/// Sample showing how to use <see cref="IChatCompletionService"/> directly with a <see cref="ChatHistory"/>.
/// </summary>
[Fact]
public async Task ServicePromptAsync()
{
Console.WriteLine("======== Azure Open AI - Chat Completion ========");

Assert.NotNull(TestConfiguration.AzureOpenAI.ChatDeploymentName);
Assert.NotNull(TestConfiguration.AzureOpenAI.Endpoint);

AzureOpenAIChatCompletionService chatCompletionService =
string.IsNullOrEmpty(TestConfiguration.AzureOpenAI.ApiKey) ?
new(
string.IsNullOrEmpty(TestConfiguration.AzureOpenAI.ApiKey)
? new(
deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName,
endpoint: TestConfiguration.AzureOpenAI.Endpoint,
credentials: new DefaultAzureCredential(),
modelId: TestConfiguration.AzureOpenAI.ChatModelId) :
new(
deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName,
endpoint: TestConfiguration.AzureOpenAI.Endpoint,
apiKey: TestConfiguration.AzureOpenAI.ApiKey,
modelId: TestConfiguration.AzureOpenAI.ChatModelId);

await StartChatAsync(chatCompletionService);
}

/// <summary>
/// Sample showing how to use Azure Open AI Chat Completion with Azure Default Credential.
/// If local auth is disabled in the Azure Open AI deployment, you can use Azure Default Credential to authenticate.
/// </summary>
[Fact]
public async Task DefaultAzureCredentialSampleAsync()
{
Console.WriteLine("======== Azure Open AI - Chat Completion with Azure Default Credential ========");

AzureOpenAIChatCompletionService chatCompletionService =
string.IsNullOrEmpty(TestConfiguration.AzureOpenAI.ApiKey) ?
new(
modelId: TestConfiguration.AzureOpenAI.ChatModelId)
: new(
deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName,
endpoint: TestConfiguration.AzureOpenAI.Endpoint,
credentials: new DefaultAzureCredential(),
modelId: TestConfiguration.AzureOpenAI.ChatModelId) :
new(
deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName,
endpoint: TestConfiguration.AzureOpenAI.Endpoint,
apiKey: TestConfiguration.AzureOpenAI.ApiKey,
modelId: TestConfiguration.AzureOpenAI.ChatModelId);

await StartChatAsync(chatCompletionService);
}
apiKey: TestConfiguration.AzureOpenAI.ApiKey,
modelId: TestConfiguration.AzureOpenAI.ChatModelId);

private async Task StartChatAsync(IChatCompletionService chatGPT)
{
Console.WriteLine("Chat content:");
Console.WriteLine("------------------------");

@@ -109,7 +90,7 @@ private async Task StartChatAsync(IChatCompletionService chatGPT)
OutputLastMessage(chatHistory);

// First assistant message
var reply = await chatGPT.GetChatMessageContentAsync(chatHistory);
var reply = await chatCompletionService.GetChatMessageContentAsync(chatHistory);
chatHistory.Add(reply);
OutputLastMessage(chatHistory);

@@ -118,7 +99,7 @@ private async Task StartChatAsync(IChatCompletionService chatGPT)
OutputLastMessage(chatHistory);

// Second assistant message
reply = await chatGPT.GetChatMessageContentAsync(chatHistory);
reply = await chatCompletionService.GetChatMessageContentAsync(chatHistory);
chatHistory.Add(reply);
OutputLastMessage(chatHistory);
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
// Copyright (c) Microsoft. All rights reserved.

using System.Text;
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.ChatCompletion;
using Microsoft.SemanticKernel.Connectors.AzureOpenAI;
using OpenAI.Chat;

namespace ChatCompletion;

// The following example shows how to use Semantic Kernel with Azure OpenAI API
public class AzureOpenAI_ChatCompletion_WithReasoning(ITestOutputHelper output) : BaseTest(output)
{
/// <summary>
/// Sample showing how to use <see cref="Kernel"/> with chat completion and chat prompt syntax.
/// </summary>
[Fact]
public async Task ChatPromptWithReasoningAsync()
{
Console.WriteLine("======== Azure Open AI - Chat Completion with Reasoning ========");

Assert.NotNull(TestConfiguration.AzureOpenAI.ChatDeploymentName);
Assert.NotNull(TestConfiguration.AzureOpenAI.Endpoint);
Assert.NotNull(TestConfiguration.AzureOpenAI.ApiKey);

var kernel = Kernel.CreateBuilder()
.AddAzureOpenAIChatCompletion(
deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName,
endpoint: TestConfiguration.AzureOpenAI.Endpoint,
apiKey: TestConfiguration.AzureOpenAI.ApiKey,
modelId: TestConfiguration.AzureOpenAI.ChatModelId)
.Build();

// Create execution settings with high reasoning effort.
var executionSettings = new AzureOpenAIPromptExecutionSettings //OpenAIPromptExecutionSettings
{
// Flags Azure SDK to use the new token property.
SetNewMaxCompletionTokensEnabled = true,
MaxTokens = 2000,
// Note: reasoning effort is only available for reasoning models (at this moment o3-mini & o1 models)
ReasoningEffort = ChatReasoningEffortLevel.Low
};

// Create KernelArguments using the execution settings.
var kernelArgs = new KernelArguments(executionSettings);

StringBuilder chatPrompt = new("""
<message role="developer">You are an expert software engineer, specialized in the Semantic Kernel SDK and NET framework</message>
<message role="user">Hi, Please craft me an example code in .NET using Semantic Kernel that implements a chat loop .</message>
""");

// Invoke the prompt with high reasoning effort.
var reply = await kernel.InvokePromptAsync(chatPrompt.ToString(), kernelArgs);

Console.WriteLine(reply);
}

/// <summary>
/// Sample showing how to use <see cref="IChatCompletionService"/> directly with a <see cref="ChatHistory"/>.
/// </summary>
[Fact]
public async Task ServicePromptWithReasoningAsync()
{
Console.WriteLine("======== Azure Open AI - Chat Completion with Azure Default Credential with Reasoning ========");

Assert.NotNull(TestConfiguration.AzureOpenAI.ChatDeploymentName);
Assert.NotNull(TestConfiguration.AzureOpenAI.Endpoint);
Assert.NotNull(TestConfiguration.AzureOpenAI.ApiKey);

IChatCompletionService chatCompletionService = new AzureOpenAIChatCompletionService(
deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName,
endpoint: TestConfiguration.AzureOpenAI.Endpoint,
apiKey: TestConfiguration.AzureOpenAI.ApiKey,
modelId: TestConfiguration.AzureOpenAI.ChatModelId);

// Create execution settings with high reasoning effort.
var executionSettings = new AzureOpenAIPromptExecutionSettings
{
// Flags Azure SDK to use the new token property.
SetNewMaxCompletionTokensEnabled = true,
MaxTokens = 2000,
// Note: reasoning effort is only available for reasoning models (at this moment o3-mini & o1 models)
ReasoningEffort = ChatReasoningEffortLevel.Low
};

// Create a ChatHistory and add messages.
var chatHistory = new ChatHistory();
chatHistory.AddDeveloperMessage(
"You are an expert software engineer, specialized in the Semantic Kernel SDK and .NET framework.");
chatHistory.AddUserMessage(
"Hi, Please craft me an example code in .NET using Semantic Kernel that implements a chat loop.");

// Instead of a prompt string, call GetChatMessageContentAsync with the chat history.
var reply = await chatCompletionService.GetChatMessageContentAsync(
chatHistory: chatHistory,
executionSettings: executionSettings);

Console.WriteLine(reply);
}
}
10 changes: 10 additions & 0 deletions dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletion.cs
Original file line number Diff line number Diff line change
@@ -10,6 +10,9 @@ namespace ChatCompletion;
// The following example shows how to use Semantic Kernel with OpenAI API
public class OpenAI_ChatCompletion(ITestOutputHelper output) : BaseTest(output)
{
/// <summary>
/// Sample showing how to use <see cref="IChatCompletionService"/> directly with a <see cref="ChatHistory"/>.
/// </summary>
[Fact]
public async Task ServicePromptAsync()
{
@@ -23,6 +26,10 @@ public async Task ServicePromptAsync()
await StartChatAsync(chatCompletionService);
}

/// <summary>
/// Sample showing how to use <see cref="IChatCompletionService"/> directly with a <see cref="ChatHistory"/> also exploring the
/// breaking glass approach capturing the underlying <see cref="OpenAI.Chat.ChatCompletion"/> instance via <see cref="KernelContent.InnerContent"/>.
/// </summary>
[Fact]
public async Task ServicePromptWithInnerContentAsync()
{
@@ -51,6 +58,9 @@ public async Task ServicePromptWithInnerContentAsync()
OutputInnerContent(replyInnerContent!);
}

/// <summary>
/// Sample showing how to use <see cref="Kernel"/> with chat completion and chat prompt syntax.
/// </summary>
[Fact]
public async Task ChatPromptAsync()
{
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
// Copyright (c) Microsoft. All rights reserved.

using System.Text;
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.ChatCompletion;
using Microsoft.SemanticKernel.Connectors.OpenAI;
using OpenAI.Chat;

namespace ChatCompletion;

// The following example shows how to use Semantic Kernel with OpenAI API
public class OpenAI_ChatCompletion_WithReasoning(ITestOutputHelper output) : BaseTest(output)
{
/// <summary>
/// Sample showing how to use <see cref="Kernel"/> with chat completion and chat prompt syntax.
/// </summary>
[Fact]
public async Task ChatPromptWithReasoningAsync()
{
Console.WriteLine("======== Open AI - Chat Completion with Reasoning ========");

Assert.NotNull(TestConfiguration.OpenAI.ChatModelId);
Assert.NotNull(TestConfiguration.OpenAI.ApiKey);

var kernel = Kernel.CreateBuilder()
.AddOpenAIChatCompletion(
modelId: TestConfiguration.OpenAI.ChatModelId,
apiKey: TestConfiguration.OpenAI.ApiKey)
.Build();

// Create execution settings with low reasoning effort.
var executionSettings = new OpenAIPromptExecutionSettings //OpenAIPromptExecutionSettings
{
MaxTokens = 2000,
ReasoningEffort = ChatReasoningEffortLevel.Low // Only available for reasoning models (i.e: o3-mini, o1, ...)
};

// Create KernelArguments using the execution settings.
var kernelArgs = new KernelArguments(executionSettings);

StringBuilder chatPrompt = new("""
<message role="developer">You are an expert software engineer, specialized in the Semantic Kernel SDK and NET framework</message>
<message role="user">Hi, Please craft me an example code in .NET using Semantic Kernel that implements a chat loop .</message>
""");

// Invoke the prompt with high reasoning effort.
var reply = await kernel.InvokePromptAsync(chatPrompt.ToString(), kernelArgs);

Console.WriteLine(reply);
}

/// <summary>
/// Sample showing how to use <see cref="IChatCompletionService"/> directly with a <see cref="ChatHistory"/>.
/// </summary>
[Fact]
public async Task ServicePromptWithReasoningAsync()
{
Assert.NotNull(TestConfiguration.OpenAI.ChatModelId);
Assert.NotNull(TestConfiguration.OpenAI.ApiKey);

Console.WriteLine("======== Open AI - Chat Completion with Reasoning ========");

OpenAIChatCompletionService chatCompletionService = new(TestConfiguration.OpenAI.ChatModelId, TestConfiguration.OpenAI.ApiKey);

// Create execution settings with low reasoning effort.
var executionSettings = new OpenAIPromptExecutionSettings
{
MaxTokens = 2000,
ReasoningEffort = ChatReasoningEffortLevel.Low // Only available for reasoning models (i.e: o3-mini, o1, ...)
};

// Create a ChatHistory and add messages.
var chatHistory = new ChatHistory();
chatHistory.AddDeveloperMessage(
"You are an expert software engineer, specialized in the Semantic Kernel SDK and .NET framework.");
chatHistory.AddUserMessage(
"Hi, Please craft me an example code in .NET using Semantic Kernel that implements a chat loop.");

// Instead of a prompt string, call GetChatMessageContentAsync with the chat history.
var reply = await chatCompletionService.GetChatMessageContentAsync(
chatHistory: chatHistory,
executionSettings: executionSettings);

Console.WriteLine(reply);
}
}

0 comments on commit 4fdaf67

Please sign in to comment.