Skip to content

Text completion 3. (Legacy)

Zoltan Juhasz edited this page Dec 10, 2023 · 1 revision

WARNING! - This is a legacy feature of OpenAI, it will be shut down on January 4th, 2024. Check more information: https://platform.openai.com/docs/api-reference/completions

The last example in this topic demonstrates, how you can receive an answer in streamed mode also.

This version works with IAsyncEnumerable. It is not supported in older .NET versions.

public static async Task Main(string[] args)
{
    using var host = Host.CreateDefaultBuilder(args)
        .ConfigureServices((builder, services) =>
        {
            services.AddForgeOpenAI(options => {
                options.AuthenticationInfo = builder
                    .Configuration["OpenAI:ApiKey"]!;
            });
        })
        .Build();

    IOpenAIService openAi = host.Services.GetService<IOpenAIService>()!;

    TextCompletionRequest request = new TextCompletionRequest();
    request.Prompt = "Write a C# code which demonstrate how to write some text into file";
    request.MaxTokens = 4096 - request.Prompt
        .Split(" ", StringSplitOptions.RemoveEmptyEntries).Length; // calculating max token
    request.Temperature = 0.1; // lower value means more precise answer

    Console.WriteLine(request.Prompt);

    await foreach (HttpOperationResult<TextCompletionResponse> response in 
        openAi.TextCompletionService.GetStreamAsync(request, CancellationToken.None))
    {
        if (response.IsSuccess)
        {
            Console.Write(response.Result?.Completions[0].Text);
        }
        else
        {
            Console.WriteLine(response);
        }
    }

}