-
Notifications
You must be signed in to change notification settings - Fork 4.5k
/
ChatChoice.cs
132 lines (124 loc) · 7.81 KB
/
ChatChoice.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// <auto-generated/>
#nullable disable
using System;
using System.Collections.Generic;
namespace Azure.AI.OpenAI
{
/// <summary>
/// The representation of a single prompt completion as part of an overall chat completions request.
/// Generally, `n` choices are generated per provided prompt with a default value of 1.
/// Token limits and other settings may limit the number of choices generated.
/// </summary>
public partial class ChatChoice
{
/// <summary>
/// Keeps track of any properties unknown to the library.
/// <para>
/// To assign an object to the value of this property use <see cref="BinaryData.FromObjectAsJson{T}(T, System.Text.Json.JsonSerializerOptions?)"/>.
/// </para>
/// <para>
/// To assign an already formatted json string to this property use <see cref="BinaryData.FromString(string)"/>.
/// </para>
/// <para>
/// Examples:
/// <list type="bullet">
/// <item>
/// <term>BinaryData.FromObjectAsJson("foo")</term>
/// <description>Creates a payload of "foo".</description>
/// </item>
/// <item>
/// <term>BinaryData.FromString("\"foo\"")</term>
/// <description>Creates a payload of "foo".</description>
/// </item>
/// <item>
/// <term>BinaryData.FromObjectAsJson(new { key = "value" })</term>
/// <description>Creates a payload of { "key": "value" }.</description>
/// </item>
/// <item>
/// <term>BinaryData.FromString("{\"key\": \"value\"}")</term>
/// <description>Creates a payload of { "key": "value" }.</description>
/// </item>
/// </list>
/// </para>
/// </summary>
private IDictionary<string, BinaryData> _serializedAdditionalRawData;
/// <summary> Initializes a new instance of <see cref="ChatChoice"/>. </summary>
/// <param name="logProbabilityInfo"> The log probability information for this choice, as enabled via the 'logprobs' request option. </param>
/// <param name="index"> The ordered index associated with this chat completions choice. </param>
/// <param name="finishReason"> The reason that this chat completions choice completed its generated. </param>
internal ChatChoice(ChatChoiceLogProbabilityInfo logProbabilityInfo, int index, CompletionsFinishReason? finishReason)
{
LogProbabilityInfo = logProbabilityInfo;
Index = index;
FinishReason = finishReason;
}
/// <summary> Initializes a new instance of <see cref="ChatChoice"/>. </summary>
/// <param name="message"> The chat message for a given chat completions prompt. </param>
/// <param name="logProbabilityInfo"> The log probability information for this choice, as enabled via the 'logprobs' request option. </param>
/// <param name="index"> The ordered index associated with this chat completions choice. </param>
/// <param name="finishReason"> The reason that this chat completions choice completed its generated. </param>
/// <param name="finishDetails">
/// The reason the model stopped generating tokens, together with any applicable details.
/// This structured representation replaces 'finish_reason' for some models.
/// Please note <see cref="ChatFinishDetails"/> is the base class. According to the scenario, a derived class of the base class might need to be assigned here, or this property needs to be casted to one of the possible derived classes.
/// The available derived classes include <see cref="MaxTokensFinishDetails"/> and <see cref="StopFinishDetails"/>.
/// </param>
/// <param name="internalStreamingDeltaMessage"> The delta message content for a streaming response. </param>
/// <param name="contentFilterResults">
/// Information about the content filtering category (hate, sexual, violence, self_harm), if it
/// has been detected, as well as the severity level (very_low, low, medium, high-scale that
/// determines the intensity and risk level of harmful content) and if it has been filtered or not.
/// </param>
/// <param name="enhancements">
/// Represents the output results of Azure OpenAI enhancements to chat completions, as configured via the matching input
/// provided in the request. This supplementary information is only available when using Azure OpenAI and only when the
/// request is configured to use enhancements.
/// </param>
/// <param name="serializedAdditionalRawData"> Keeps track of any properties unknown to the library. </param>
internal ChatChoice(ChatResponseMessage message, ChatChoiceLogProbabilityInfo logProbabilityInfo, int index, CompletionsFinishReason? finishReason, ChatFinishDetails finishDetails, ChatResponseMessage internalStreamingDeltaMessage, ContentFilterResultsForChoice contentFilterResults, AzureChatEnhancements enhancements, IDictionary<string, BinaryData> serializedAdditionalRawData)
{
Message = message;
LogProbabilityInfo = logProbabilityInfo;
Index = index;
FinishReason = finishReason;
FinishDetails = finishDetails;
InternalStreamingDeltaMessage = internalStreamingDeltaMessage;
ContentFilterResults = contentFilterResults;
Enhancements = enhancements;
_serializedAdditionalRawData = serializedAdditionalRawData;
}
/// <summary> Initializes a new instance of <see cref="ChatChoice"/> for deserialization. </summary>
internal ChatChoice()
{
}
/// <summary> The chat message for a given chat completions prompt. </summary>
public ChatResponseMessage Message { get; }
/// <summary> The log probability information for this choice, as enabled via the 'logprobs' request option. </summary>
public ChatChoiceLogProbabilityInfo LogProbabilityInfo { get; }
/// <summary> The ordered index associated with this chat completions choice. </summary>
public int Index { get; }
/// <summary> The reason that this chat completions choice completed its generated. </summary>
public CompletionsFinishReason? FinishReason { get; }
/// <summary>
/// The reason the model stopped generating tokens, together with any applicable details.
/// This structured representation replaces 'finish_reason' for some models.
/// Please note <see cref="ChatFinishDetails"/> is the base class. According to the scenario, a derived class of the base class might need to be assigned here, or this property needs to be casted to one of the possible derived classes.
/// The available derived classes include <see cref="MaxTokensFinishDetails"/> and <see cref="StopFinishDetails"/>.
/// </summary>
public ChatFinishDetails FinishDetails { get; }
/// <summary>
/// Information about the content filtering category (hate, sexual, violence, self_harm), if it
/// has been detected, as well as the severity level (very_low, low, medium, high-scale that
/// determines the intensity and risk level of harmful content) and if it has been filtered or not.
/// </summary>
public ContentFilterResultsForChoice ContentFilterResults { get; }
/// <summary>
/// Represents the output results of Azure OpenAI enhancements to chat completions, as configured via the matching input
/// provided in the request. This supplementary information is only available when using Azure OpenAI and only when the
/// request is configured to use enhancements.
/// </summary>
public AzureChatEnhancements Enhancements { get; }
}
}