Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion api/OpenAI.net8.0.cs
Original file line number Diff line number Diff line change
Expand Up @@ -1723,6 +1723,8 @@ public class ChatCompletionOptions : IJsonModel<ChatCompletionOptions>, IPersist
[Experimental("OPENAI001")]
public ChatResponseModalities ResponseModalities { get; set; }
[Experimental("OPENAI001")]
public string SafetyIdentifier { get; set; }
[Experimental("OPENAI001")]
public long? Seed { get; set; }
[Experimental("OPENAI001")]
public ChatServiceTier? ServiceTier { get; set; }
Expand Down Expand Up @@ -5591,13 +5593,15 @@ public enum MessageStatus {
[Experimental("OPENAI001")]
public class OpenAIResponse : IJsonModel<OpenAIResponse>, IPersistableModel<OpenAIResponse> {
public bool? BackgroundModeEnabled { get; }
public string ConversationId { get; }
public DateTimeOffset CreatedAt { get; }
public string EndUserId { get; }
public ResponseError Error { get; }
public string Id { get; }
public ResponseIncompleteStatusDetails IncompleteStatusDetails { get; }
public string Instructions { get; }
public int? MaxOutputTokenCount { get; }
public int? MaxToolCallCount { get; }
public IDictionary<string, string> Metadata { get; }
public string Model { get; }
public IList<ResponseItem> OutputItems { get; }
Expand All @@ -5608,12 +5612,14 @@ public class OpenAIResponse : IJsonModel<OpenAIResponse>, IPersistableModel<Open
public ref JsonPatch Patch { get; }
public string PreviousResponseId { get; }
public ResponseReasoningOptions ReasoningOptions { get; }
public string SafetyIdentifier { get; }
public ResponseServiceTier? ServiceTier { get; }
public ResponseStatus? Status { get; }
public float? Temperature { get; }
public ResponseTextOptions TextOptions { get; }
public ResponseToolChoice ToolChoice { get; }
public IList<ResponseTool> Tools { get; }
public int? TopLogProbabilityCount { get; }
public float? TopP { get; }
public ResponseTruncationMode? TruncationMode { get; }
public ResponseTokenUsage Usage { get; }
Expand Down Expand Up @@ -5670,7 +5676,7 @@ public class OpenAIResponseClient {
[Experimental("OPENAI001")]
public static class OpenAIResponsesModelFactory {
public static MessageResponseItem MessageResponseItem(string id = null, MessageRole role = MessageRole.Assistant, MessageStatus? status = null);
public static OpenAIResponse OpenAIResponse(string id = null, DateTimeOffset createdAt = default, ResponseStatus? status = null, ResponseError error = null, ResponseTokenUsage usage = null, string endUserId = null, ResponseReasoningOptions reasoningOptions = null, int? maxOutputTokenCount = null, ResponseTextOptions textOptions = null, ResponseTruncationMode? truncationMode = null, ResponseIncompleteStatusDetails incompleteStatusDetails = null, IEnumerable<ResponseItem> outputItems = null, bool parallelToolCallsEnabled = false, ResponseToolChoice toolChoice = null, string model = null, IDictionary<string, string> metadata = null, float? temperature = null, float? topP = null, ResponseServiceTier? serviceTier = null, string previousResponseId = null, bool? backgroundModeEnabled = null, string instructions = null, IEnumerable<ResponseTool> tools = null);
public static OpenAIResponse OpenAIResponse(string id = null, DateTimeOffset createdAt = default, ResponseStatus? status = null, ResponseError error = null, ResponseTokenUsage usage = null, string endUserId = null, string safetyIdentifier = null, ResponseReasoningOptions reasoningOptions = null, int? maxOutputTokenCount = null, int? maxToolCallCount = null, ResponseTextOptions textOptions = null, ResponseTruncationMode? truncationMode = null, ResponseIncompleteStatusDetails incompleteStatusDetails = null, IEnumerable<ResponseItem> outputItems = null, bool parallelToolCallsEnabled = false, ResponseToolChoice toolChoice = null, string model = null, IDictionary<string, string> metadata = null, float? temperature = null, int? topLogProbabilityCount = null, float? topP = null, ResponseServiceTier? serviceTier = null, string previousResponseId = null, bool? backgroundModeEnabled = null, string instructions = null, IEnumerable<ResponseTool> tools = null, string conversationId = null);
public static ReasoningResponseItem ReasoningResponseItem(string id = null, string encryptedContent = null, ReasoningStatus? status = null, IEnumerable<ReasoningSummaryPart> summaryParts = null);
public static ReasoningResponseItem ReasoningResponseItem(string id = null, string encryptedContent = null, ReasoningStatus? status = null, string summaryText = null);
public static ReferenceResponseItem ReferenceResponseItem(string id = null);
Expand Down Expand Up @@ -5764,10 +5770,12 @@ public enum ResponseContentPartKind {
[Experimental("OPENAI001")]
public class ResponseCreationOptions : IJsonModel<ResponseCreationOptions>, IPersistableModel<ResponseCreationOptions> {
public bool? BackgroundModeEnabled { get; set; }
public string ConversationId { get; set; }
public string EndUserId { get; set; }
public IList<IncludedResponseProperty> IncludedProperties { get; }
public string Instructions { get; set; }
public int? MaxOutputTokenCount { get; set; }
public int? MaxToolCallCount { get; set; }
public IDictionary<string, string> Metadata { get; }
public bool? ParallelToolCallsEnabled { get; set; }
[Serialization.JsonIgnore]
Expand All @@ -5776,12 +5784,14 @@ public class ResponseCreationOptions : IJsonModel<ResponseCreationOptions>, IPer
public ref JsonPatch Patch { get; }
public string PreviousResponseId { get; set; }
public ResponseReasoningOptions ReasoningOptions { get; set; }
public string SafetyIdentifier { get; set; }
public ResponseServiceTier? ServiceTier { get; set; }
public bool? StoredOutputEnabled { get; set; }
public float? Temperature { get; set; }
public ResponseTextOptions TextOptions { get; set; }
public ResponseToolChoice ToolChoice { get; set; }
public IList<ResponseTool> Tools { get; }
public int? TopLogProbabilityCount { get; set; }
public float? TopP { get; set; }
public ResponseTruncationMode? TruncationMode { get; set; }
protected virtual ResponseCreationOptions JsonModelCreateCore(ref Utf8JsonReader reader, ModelReaderWriterOptions options);
Expand Down
11 changes: 10 additions & 1 deletion api/OpenAI.netstandard2.0.cs
Original file line number Diff line number Diff line change
Expand Up @@ -1521,6 +1521,7 @@ public class ChatCompletionOptions : IJsonModel<ChatCompletionOptions>, IPersist
public ChatReasoningEffortLevel? ReasoningEffortLevel { get; set; }
public ChatResponseFormat ResponseFormat { get; set; }
public ChatResponseModalities ResponseModalities { get; set; }
public string SafetyIdentifier { get; set; }
public long? Seed { get; set; }
public ChatServiceTier? ServiceTier { get; set; }
public IList<string> StopSequences { get; }
Expand Down Expand Up @@ -4907,13 +4908,15 @@ public enum MessageStatus {
}
public class OpenAIResponse : IJsonModel<OpenAIResponse>, IPersistableModel<OpenAIResponse> {
public bool? BackgroundModeEnabled { get; }
public string ConversationId { get; }
public DateTimeOffset CreatedAt { get; }
public string EndUserId { get; }
public ResponseError Error { get; }
public string Id { get; }
public ResponseIncompleteStatusDetails IncompleteStatusDetails { get; }
public string Instructions { get; }
public int? MaxOutputTokenCount { get; }
public int? MaxToolCallCount { get; }
public IDictionary<string, string> Metadata { get; }
public string Model { get; }
public IList<ResponseItem> OutputItems { get; }
Expand All @@ -4923,12 +4926,14 @@ public class OpenAIResponse : IJsonModel<OpenAIResponse>, IPersistableModel<Open
public ref JsonPatch Patch { get; }
public string PreviousResponseId { get; }
public ResponseReasoningOptions ReasoningOptions { get; }
public string SafetyIdentifier { get; }
public ResponseServiceTier? ServiceTier { get; }
public ResponseStatus? Status { get; }
public float? Temperature { get; }
public ResponseTextOptions TextOptions { get; }
public ResponseToolChoice ToolChoice { get; }
public IList<ResponseTool> Tools { get; }
public int? TopLogProbabilityCount { get; }
public float? TopP { get; }
public ResponseTruncationMode? TruncationMode { get; }
public ResponseTokenUsage Usage { get; }
Expand Down Expand Up @@ -4981,7 +4986,7 @@ public class OpenAIResponseClient {
}
public static class OpenAIResponsesModelFactory {
public static MessageResponseItem MessageResponseItem(string id = null, MessageRole role = MessageRole.Assistant, MessageStatus? status = null);
public static OpenAIResponse OpenAIResponse(string id = null, DateTimeOffset createdAt = default, ResponseStatus? status = null, ResponseError error = null, ResponseTokenUsage usage = null, string endUserId = null, ResponseReasoningOptions reasoningOptions = null, int? maxOutputTokenCount = null, ResponseTextOptions textOptions = null, ResponseTruncationMode? truncationMode = null, ResponseIncompleteStatusDetails incompleteStatusDetails = null, IEnumerable<ResponseItem> outputItems = null, bool parallelToolCallsEnabled = false, ResponseToolChoice toolChoice = null, string model = null, IDictionary<string, string> metadata = null, float? temperature = null, float? topP = null, ResponseServiceTier? serviceTier = null, string previousResponseId = null, bool? backgroundModeEnabled = null, string instructions = null, IEnumerable<ResponseTool> tools = null);
public static OpenAIResponse OpenAIResponse(string id = null, DateTimeOffset createdAt = default, ResponseStatus? status = null, ResponseError error = null, ResponseTokenUsage usage = null, string endUserId = null, string safetyIdentifier = null, ResponseReasoningOptions reasoningOptions = null, int? maxOutputTokenCount = null, int? maxToolCallCount = null, ResponseTextOptions textOptions = null, ResponseTruncationMode? truncationMode = null, ResponseIncompleteStatusDetails incompleteStatusDetails = null, IEnumerable<ResponseItem> outputItems = null, bool parallelToolCallsEnabled = false, ResponseToolChoice toolChoice = null, string model = null, IDictionary<string, string> metadata = null, float? temperature = null, int? topLogProbabilityCount = null, float? topP = null, ResponseServiceTier? serviceTier = null, string previousResponseId = null, bool? backgroundModeEnabled = null, string instructions = null, IEnumerable<ResponseTool> tools = null, string conversationId = null);
public static ReasoningResponseItem ReasoningResponseItem(string id = null, string encryptedContent = null, ReasoningStatus? status = null, IEnumerable<ReasoningSummaryPart> summaryParts = null);
public static ReasoningResponseItem ReasoningResponseItem(string id = null, string encryptedContent = null, ReasoningStatus? status = null, string summaryText = null);
public static ReferenceResponseItem ReferenceResponseItem(string id = null);
Expand Down Expand Up @@ -5065,23 +5070,27 @@ public enum ResponseContentPartKind {
}
public class ResponseCreationOptions : IJsonModel<ResponseCreationOptions>, IPersistableModel<ResponseCreationOptions> {
public bool? BackgroundModeEnabled { get; set; }
public string ConversationId { get; set; }
public string EndUserId { get; set; }
public IList<IncludedResponseProperty> IncludedProperties { get; }
public string Instructions { get; set; }
public int? MaxOutputTokenCount { get; set; }
public int? MaxToolCallCount { get; set; }
public IDictionary<string, string> Metadata { get; }
public bool? ParallelToolCallsEnabled { get; set; }
[Serialization.JsonIgnore]
[EditorBrowsable(EditorBrowsableState.Never)]
public ref JsonPatch Patch { get; }
public string PreviousResponseId { get; set; }
public ResponseReasoningOptions ReasoningOptions { get; set; }
public string SafetyIdentifier { get; set; }
public ResponseServiceTier? ServiceTier { get; set; }
public bool? StoredOutputEnabled { get; set; }
public float? Temperature { get; set; }
public ResponseTextOptions TextOptions { get; set; }
public ResponseToolChoice ToolChoice { get; set; }
public IList<ResponseTool> Tools { get; }
public int? TopLogProbabilityCount { get; set; }
public float? TopP { get; set; }
public ResponseTruncationMode? TruncationMode { get; set; }
protected virtual ResponseCreationOptions JsonModelCreateCore(ref Utf8JsonReader reader, ModelReaderWriterOptions options);
Expand Down
9 changes: 0 additions & 9 deletions specification/base/typespec/chat/models.tsp
Original file line number Diff line number Diff line change
Expand Up @@ -192,15 +192,6 @@ model CreateChatCompletionRequest {
search_context_size?: WebSearchContextSize = "medium";
};

@doc("""
An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
""")
@minValue(0)
@maxValue(20)
top_logprobs?: int32 | null;

// Tool customization: apply a named union type
@doc("""
An object specifying the format that the model must output.
Expand Down
14 changes: 14 additions & 0 deletions specification/base/typespec/common/models.tsp
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,9 @@ model ModelResponsePropertiesForRequest {
@minValue(0)
@maxValue(2)
temperature?: float32 | null = 1;

/** An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. */
top_logprobs?: int32 | null;

@doc("""
An alternative to sampling with temperature, called nucleus sampling,
Expand All @@ -265,6 +268,10 @@ model ModelResponsePropertiesForRequest {
/** A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). */
user?: string;

/**A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies.
The IDs should be a string that uniquely identifies each user. We recommend hashing their username or email address, in order to avoid sending us any identifying information. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).*/
safety_identifier?: string;

service_tier?: ServiceTier;
}
model ModelResponsePropertiesForResponse {
Expand All @@ -278,6 +285,9 @@ model ModelResponsePropertiesForResponse {
@maxValue(2)
temperature: float32 | null;

/** An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. */
top_logprobs?: int32 | null;

@doc("""
An alternative to sampling with temperature, called nucleus sampling,
where the model considers the results of the tokens with top_p probability
Expand All @@ -293,6 +303,10 @@ model ModelResponsePropertiesForResponse {
/** A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). */
user: string | null;

/**A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies.
The IDs should be a string that uniquely identifies each user. We recommend hashing their username or email address, in order to avoid sending us any identifying information. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).*/
safety_identifier?: string;

service_tier?: ServiceTier;
}

Expand Down
25 changes: 25 additions & 0 deletions specification/base/typespec/responses/models.tsp
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,25 @@ model CreateResponse {
* for more information.
*/
stream?: boolean | null = false;

/** The conversation that this response belongs to.
* Items from this conversation are prepended to input_items for this response request.
* Input items and output items from this response are automatically added to this conversation after this response completes. */
conversation?: ConversationParam | null;
}

/**The conversation that this response belongs to. Items from this conversation are prepended to `input_items` for this response request.
Input items and output items from this response are automatically added to this conversation after this response completes.*/
union ConversationParam {
string,
`ConversationParam-2`,
}

/** The conversation that this response belongs to. */
@summary("Conversation object")
model `ConversationParam-2` {
/** The unique ID of the conversation. */
id: string;
}

model Response {
Expand Down Expand Up @@ -149,6 +168,9 @@ model Response {

/** Whether to allow the model to run tool calls in parallel. */
parallel_tool_calls: boolean = true;

/** The conversation that this response belongs to. Input items and output items from this response are automatically added to this conversation. */
conversation?: `ConversationParam-2` | null;
}

model ResponseProperties {
Expand Down Expand Up @@ -178,6 +200,9 @@ model ResponseProperties {
/** An upper bound for the number of tokens that can be generated for a response, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). */
max_output_tokens?: int32 | null;

/** The maximum number of total calls to built-in tools that can be processed in a response. This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored. */
max_tool_calls?: int32 | null;

@doc("""
Inserts a system (or developer) message as the first item in the model's context.

Expand Down
2 changes: 2 additions & 0 deletions specification/client/responses.client.tsp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ using TypeSpec.HttpClient.CSharp;

@@alternateType(CreateResponse.service_tier, DotNetResponseServiceTier);
@@alternateType(Response.service_tier, DotNetResponseServiceTier);
@@alternateType(CreateResponse.conversation, string);
@@clientName(CreateResponse.conversation, "ConversationId");

// ------------ ItemResources ------------
@@usage(ItemResource, Usage.input | Usage.output);
Expand Down
3 changes: 2 additions & 1 deletion src/Custom/Responses/Internal/GeneratorStubs.cs
Original file line number Diff line number Diff line change
Expand Up @@ -121,4 +121,5 @@ namespace OpenAI.Responses;
[CodeGenType("DotNetCustomToolCallApprovalPolicyAlways")] internal partial class InternalDotNetCustomToolCallApprovalPolicyAlways { }
[CodeGenType("DotNetCustomToolCallApprovalPolicyNever")] internal partial class InternalDotNetCustomToolCallApprovalPolicyNever { }
[CodeGenType("UnknownCodeInterpreterToolOutput")] internal partial class InternalUnknownCodeInterpreterToolOutput {}
[CodeGenType("UnknownCodeInterpreterContainerConfiguration")] internal partial class InternalUnknownCodeInterpreterContainerConfiguration {}
[CodeGenType("UnknownCodeInterpreterContainerConfiguration")] internal partial class InternalUnknownCodeInterpreterContainerConfiguration {}
[CodeGenType("ConversationParam2")] internal partial class InternalConversation {}
10 changes: 10 additions & 0 deletions src/Custom/Responses/OpenAIResponse.cs
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,10 @@ public partial class OpenAIResponse
[CodeGenMember("MaxOutputTokens")]
public int? MaxOutputTokenCount { get; }

// CUSTOM: Renamed.
[CodeGenMember("MaxToolCalls")]
public int? MaxToolCallCount { get; }

// CUSTOM: Renamed.
[CodeGenMember("Text")]
public ResponseTextOptions TextOptions { get; }
Expand Down Expand Up @@ -58,6 +62,12 @@ public partial class OpenAIResponse
[CodeGenMember("Model")]
public string Model { get; }

public string ConversationId => Conversation.Id;

// CUSTOM: Renamed.
[CodeGenMember("TopLogprobs")]
public int? TopLogProbabilityCount { get; }

// CUSTOM: Made internal
internal string Object { get; } = "response";

Expand Down
Loading
Loading