From 4fdaf67b5a6314311676bea2a36a3b3dc2c08662 Mon Sep 17 00:00:00 2001 From: Jose Luis Latorre Millas Date: Fri, 28 Feb 2025 12:28:51 +0100 Subject: [PATCH] .Net: Sample Code Showcasing Usage of Reasoning Models in OpenAI and AzureOpenAI (#10558) ### Motivation and Context 1. Required: showing usage of reasoning effort. 2. Problem: Controlling reasoning effort - no sample 3. Scenario: Using reasoning effort to benefit from the new amazing models :) ### Description This pull request adds sample code that demonstrates how to leverage reasoning models in a ChatCompletion on AzureOpenAI and OpenAI. This implementation how to leverage LLM Reasoning capabilities but also complements the phenomenal code from Roger Barreto (@RogerBarreto), further strengthening Semantic Kernel overall solution. ### Contribution Checklist - [ ] The code builds clean without any errors or warnings - [ ] The PR follows the [SK Contribution Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md) and the [pre-submission formatting script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts) raises no violations - [ ] All unit tests pass, and I have added new tests where possible - [ ] I didn't break anyone :smile: --------- Co-authored-by: Roger Barreto <19890735+RogerBarreto@users.noreply.github.com> --- .../AzureOpenAI_ChatCompletion.cs | 59 ++++------- ...zureOpenAI_ChatCompletion_WithReasoning.cs | 100 ++++++++++++++++++ .../ChatCompletion/OpenAI_ChatCompletion.cs | 10 ++ .../OpenAI_ChatCompletion_WithReasoning.cs | 86 +++++++++++++++ 4 files changed, 216 insertions(+), 39 deletions(-) create mode 100644 dotnet/samples/Concepts/ChatCompletion/AzureOpenAI_ChatCompletion_WithReasoning.cs create mode 100644 dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletion_WithReasoning.cs diff --git a/dotnet/samples/Concepts/ChatCompletion/AzureOpenAI_ChatCompletion.cs b/dotnet/samples/Concepts/ChatCompletion/AzureOpenAI_ChatCompletion.cs index 07982751ff0f..e0bc277b9f2d 100644 --- a/dotnet/samples/Concepts/ChatCompletion/AzureOpenAI_ChatCompletion.cs +++ b/dotnet/samples/Concepts/ChatCompletion/AzureOpenAI_ChatCompletion.cs @@ -11,9 +11,14 @@ namespace ChatCompletion; // The following example shows how to use Semantic Kernel with Azure OpenAI API public class AzureOpenAI_ChatCompletion(ITestOutputHelper output) : BaseTest(output) { + /// + /// Sample showing how to use with chat completion and chat prompt syntax. + /// [Fact] public async Task ChatPromptAsync() { + Console.WriteLine("======== Azure Open AI - Chat Completion ========"); + Assert.NotNull(TestConfiguration.AzureOpenAI.ChatDeploymentName); Assert.NotNull(TestConfiguration.AzureOpenAI.Endpoint); @@ -39,8 +44,8 @@ public async Task ChatPromptAsync() apiKey: TestConfiguration.AzureOpenAI.ApiKey, modelId: TestConfiguration.AzureOpenAI.ChatModelId); } - var kernel = kernelBuilder.Build(); + var kernel = kernelBuilder.Build(); var reply = await kernel.InvokePromptAsync(chatPrompt.ToString()); chatPrompt.AppendLine($""); @@ -51,54 +56,30 @@ public async Task ChatPromptAsync() Console.WriteLine(reply); } + /// + /// Sample showing how to use directly with a . + /// [Fact] public async Task ServicePromptAsync() { Console.WriteLine("======== Azure Open AI - Chat Completion ========"); + Assert.NotNull(TestConfiguration.AzureOpenAI.ChatDeploymentName); + Assert.NotNull(TestConfiguration.AzureOpenAI.Endpoint); + AzureOpenAIChatCompletionService chatCompletionService = - string.IsNullOrEmpty(TestConfiguration.AzureOpenAI.ApiKey) ? - new( + string.IsNullOrEmpty(TestConfiguration.AzureOpenAI.ApiKey) + ? new( deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName, endpoint: TestConfiguration.AzureOpenAI.Endpoint, credentials: new DefaultAzureCredential(), - modelId: TestConfiguration.AzureOpenAI.ChatModelId) : - new( - deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName, - endpoint: TestConfiguration.AzureOpenAI.Endpoint, - apiKey: TestConfiguration.AzureOpenAI.ApiKey, - modelId: TestConfiguration.AzureOpenAI.ChatModelId); - - await StartChatAsync(chatCompletionService); - } - - /// - /// Sample showing how to use Azure Open AI Chat Completion with Azure Default Credential. - /// If local auth is disabled in the Azure Open AI deployment, you can use Azure Default Credential to authenticate. - /// - [Fact] - public async Task DefaultAzureCredentialSampleAsync() - { - Console.WriteLine("======== Azure Open AI - Chat Completion with Azure Default Credential ========"); - - AzureOpenAIChatCompletionService chatCompletionService = - string.IsNullOrEmpty(TestConfiguration.AzureOpenAI.ApiKey) ? - new( + modelId: TestConfiguration.AzureOpenAI.ChatModelId) + : new( deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName, endpoint: TestConfiguration.AzureOpenAI.Endpoint, - credentials: new DefaultAzureCredential(), - modelId: TestConfiguration.AzureOpenAI.ChatModelId) : - new( - deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName, - endpoint: TestConfiguration.AzureOpenAI.Endpoint, - apiKey: TestConfiguration.AzureOpenAI.ApiKey, - modelId: TestConfiguration.AzureOpenAI.ChatModelId); - - await StartChatAsync(chatCompletionService); - } + apiKey: TestConfiguration.AzureOpenAI.ApiKey, + modelId: TestConfiguration.AzureOpenAI.ChatModelId); - private async Task StartChatAsync(IChatCompletionService chatGPT) - { Console.WriteLine("Chat content:"); Console.WriteLine("------------------------"); @@ -109,7 +90,7 @@ private async Task StartChatAsync(IChatCompletionService chatGPT) OutputLastMessage(chatHistory); // First assistant message - var reply = await chatGPT.GetChatMessageContentAsync(chatHistory); + var reply = await chatCompletionService.GetChatMessageContentAsync(chatHistory); chatHistory.Add(reply); OutputLastMessage(chatHistory); @@ -118,7 +99,7 @@ private async Task StartChatAsync(IChatCompletionService chatGPT) OutputLastMessage(chatHistory); // Second assistant message - reply = await chatGPT.GetChatMessageContentAsync(chatHistory); + reply = await chatCompletionService.GetChatMessageContentAsync(chatHistory); chatHistory.Add(reply); OutputLastMessage(chatHistory); } diff --git a/dotnet/samples/Concepts/ChatCompletion/AzureOpenAI_ChatCompletion_WithReasoning.cs b/dotnet/samples/Concepts/ChatCompletion/AzureOpenAI_ChatCompletion_WithReasoning.cs new file mode 100644 index 000000000000..6d8bebb4f27e --- /dev/null +++ b/dotnet/samples/Concepts/ChatCompletion/AzureOpenAI_ChatCompletion_WithReasoning.cs @@ -0,0 +1,100 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.ChatCompletion; +using Microsoft.SemanticKernel.Connectors.AzureOpenAI; +using OpenAI.Chat; + +namespace ChatCompletion; + +// The following example shows how to use Semantic Kernel with Azure OpenAI API +public class AzureOpenAI_ChatCompletion_WithReasoning(ITestOutputHelper output) : BaseTest(output) +{ + /// + /// Sample showing how to use with chat completion and chat prompt syntax. + /// + [Fact] + public async Task ChatPromptWithReasoningAsync() + { + Console.WriteLine("======== Azure Open AI - Chat Completion with Reasoning ========"); + + Assert.NotNull(TestConfiguration.AzureOpenAI.ChatDeploymentName); + Assert.NotNull(TestConfiguration.AzureOpenAI.Endpoint); + Assert.NotNull(TestConfiguration.AzureOpenAI.ApiKey); + + var kernel = Kernel.CreateBuilder() + .AddAzureOpenAIChatCompletion( + deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName, + endpoint: TestConfiguration.AzureOpenAI.Endpoint, + apiKey: TestConfiguration.AzureOpenAI.ApiKey, + modelId: TestConfiguration.AzureOpenAI.ChatModelId) + .Build(); + + // Create execution settings with high reasoning effort. + var executionSettings = new AzureOpenAIPromptExecutionSettings //OpenAIPromptExecutionSettings + { + // Flags Azure SDK to use the new token property. + SetNewMaxCompletionTokensEnabled = true, + MaxTokens = 2000, + // Note: reasoning effort is only available for reasoning models (at this moment o3-mini & o1 models) + ReasoningEffort = ChatReasoningEffortLevel.Low + }; + + // Create KernelArguments using the execution settings. + var kernelArgs = new KernelArguments(executionSettings); + + StringBuilder chatPrompt = new(""" + You are an expert software engineer, specialized in the Semantic Kernel SDK and NET framework + Hi, Please craft me an example code in .NET using Semantic Kernel that implements a chat loop . + """); + + // Invoke the prompt with high reasoning effort. + var reply = await kernel.InvokePromptAsync(chatPrompt.ToString(), kernelArgs); + + Console.WriteLine(reply); + } + + /// + /// Sample showing how to use directly with a . + /// + [Fact] + public async Task ServicePromptWithReasoningAsync() + { + Console.WriteLine("======== Azure Open AI - Chat Completion with Azure Default Credential with Reasoning ========"); + + Assert.NotNull(TestConfiguration.AzureOpenAI.ChatDeploymentName); + Assert.NotNull(TestConfiguration.AzureOpenAI.Endpoint); + Assert.NotNull(TestConfiguration.AzureOpenAI.ApiKey); + + IChatCompletionService chatCompletionService = new AzureOpenAIChatCompletionService( + deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName, + endpoint: TestConfiguration.AzureOpenAI.Endpoint, + apiKey: TestConfiguration.AzureOpenAI.ApiKey, + modelId: TestConfiguration.AzureOpenAI.ChatModelId); + + // Create execution settings with high reasoning effort. + var executionSettings = new AzureOpenAIPromptExecutionSettings + { + // Flags Azure SDK to use the new token property. + SetNewMaxCompletionTokensEnabled = true, + MaxTokens = 2000, + // Note: reasoning effort is only available for reasoning models (at this moment o3-mini & o1 models) + ReasoningEffort = ChatReasoningEffortLevel.Low + }; + + // Create a ChatHistory and add messages. + var chatHistory = new ChatHistory(); + chatHistory.AddDeveloperMessage( + "You are an expert software engineer, specialized in the Semantic Kernel SDK and .NET framework."); + chatHistory.AddUserMessage( + "Hi, Please craft me an example code in .NET using Semantic Kernel that implements a chat loop."); + + // Instead of a prompt string, call GetChatMessageContentAsync with the chat history. + var reply = await chatCompletionService.GetChatMessageContentAsync( + chatHistory: chatHistory, + executionSettings: executionSettings); + + Console.WriteLine(reply); + } +} diff --git a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletion.cs b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletion.cs index 22fb6dbd82f5..cf4a571d38c4 100644 --- a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletion.cs +++ b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletion.cs @@ -10,6 +10,9 @@ namespace ChatCompletion; // The following example shows how to use Semantic Kernel with OpenAI API public class OpenAI_ChatCompletion(ITestOutputHelper output) : BaseTest(output) { + /// + /// Sample showing how to use directly with a . + /// [Fact] public async Task ServicePromptAsync() { @@ -23,6 +26,10 @@ public async Task ServicePromptAsync() await StartChatAsync(chatCompletionService); } + /// + /// Sample showing how to use directly with a also exploring the + /// breaking glass approach capturing the underlying instance via . + /// [Fact] public async Task ServicePromptWithInnerContentAsync() { @@ -51,6 +58,9 @@ public async Task ServicePromptWithInnerContentAsync() OutputInnerContent(replyInnerContent!); } + /// + /// Sample showing how to use with chat completion and chat prompt syntax. + /// [Fact] public async Task ChatPromptAsync() { diff --git a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletion_WithReasoning.cs b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletion_WithReasoning.cs new file mode 100644 index 000000000000..547df991744c --- /dev/null +++ b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletion_WithReasoning.cs @@ -0,0 +1,86 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.ChatCompletion; +using Microsoft.SemanticKernel.Connectors.OpenAI; +using OpenAI.Chat; + +namespace ChatCompletion; + +// The following example shows how to use Semantic Kernel with OpenAI API +public class OpenAI_ChatCompletion_WithReasoning(ITestOutputHelper output) : BaseTest(output) +{ + /// + /// Sample showing how to use with chat completion and chat prompt syntax. + /// + [Fact] + public async Task ChatPromptWithReasoningAsync() + { + Console.WriteLine("======== Open AI - Chat Completion with Reasoning ========"); + + Assert.NotNull(TestConfiguration.OpenAI.ChatModelId); + Assert.NotNull(TestConfiguration.OpenAI.ApiKey); + + var kernel = Kernel.CreateBuilder() + .AddOpenAIChatCompletion( + modelId: TestConfiguration.OpenAI.ChatModelId, + apiKey: TestConfiguration.OpenAI.ApiKey) + .Build(); + + // Create execution settings with low reasoning effort. + var executionSettings = new OpenAIPromptExecutionSettings //OpenAIPromptExecutionSettings + { + MaxTokens = 2000, + ReasoningEffort = ChatReasoningEffortLevel.Low // Only available for reasoning models (i.e: o3-mini, o1, ...) + }; + + // Create KernelArguments using the execution settings. + var kernelArgs = new KernelArguments(executionSettings); + + StringBuilder chatPrompt = new(""" + You are an expert software engineer, specialized in the Semantic Kernel SDK and NET framework + Hi, Please craft me an example code in .NET using Semantic Kernel that implements a chat loop . + """); + + // Invoke the prompt with high reasoning effort. + var reply = await kernel.InvokePromptAsync(chatPrompt.ToString(), kernelArgs); + + Console.WriteLine(reply); + } + + /// + /// Sample showing how to use directly with a . + /// + [Fact] + public async Task ServicePromptWithReasoningAsync() + { + Assert.NotNull(TestConfiguration.OpenAI.ChatModelId); + Assert.NotNull(TestConfiguration.OpenAI.ApiKey); + + Console.WriteLine("======== Open AI - Chat Completion with Reasoning ========"); + + OpenAIChatCompletionService chatCompletionService = new(TestConfiguration.OpenAI.ChatModelId, TestConfiguration.OpenAI.ApiKey); + + // Create execution settings with low reasoning effort. + var executionSettings = new OpenAIPromptExecutionSettings + { + MaxTokens = 2000, + ReasoningEffort = ChatReasoningEffortLevel.Low // Only available for reasoning models (i.e: o3-mini, o1, ...) + }; + + // Create a ChatHistory and add messages. + var chatHistory = new ChatHistory(); + chatHistory.AddDeveloperMessage( + "You are an expert software engineer, specialized in the Semantic Kernel SDK and .NET framework."); + chatHistory.AddUserMessage( + "Hi, Please craft me an example code in .NET using Semantic Kernel that implements a chat loop."); + + // Instead of a prompt string, call GetChatMessageContentAsync with the chat history. + var reply = await chatCompletionService.GetChatMessageContentAsync( + chatHistory: chatHistory, + executionSettings: executionSettings); + + Console.WriteLine(reply); + } +}