Skip to content

Commit

Permalink
updates from spec (#142)
Browse files Browse the repository at this point in the history
* latest spec

* fix doc comments from spec
  • Loading branch information
64bit committed Nov 10, 2023
1 parent ecf3640 commit 937d277
Show file tree
Hide file tree
Showing 4 changed files with 118 additions and 86 deletions.
2 changes: 1 addition & 1 deletion async-openai/src/types/assistants/run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use super::AssistantTools;
pub struct RunObject {
/// The identifier, which can be referenced in API endpoints.
pub id: String,
/// The object type, which is always `assistant.run`.
/// The object type, which is always `thread.run`.
pub object: String,
/// The Unix timestamp (in seconds) for when the run was created.
pub created_at: i32,
Expand Down
4 changes: 2 additions & 2 deletions async-openai/src/types/assistants/step.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ pub enum RunStepType {
pub struct RunStepObject {
/// The identifier, which can be referenced in API endpoints.
pub id: String,
/// The object type, which is always `assistant.run.step`.
/// The object type, which is always `thread.run.step`.
pub object: String,
/// The Unix timestamp (in seconds) for when the run step was created.
pub created_at: i32,
Expand All @@ -33,7 +33,7 @@ pub struct RunStepObject {
/// The type of run step, which can be either `message_creation` or `tool_calls`.
pub r#type: RunStepType,

/// The status of the run, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`.
/// The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`.
pub status: RunStatus,

/// The details of the run step.
Expand Down
9 changes: 8 additions & 1 deletion async-openai/src/types/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1410,7 +1410,11 @@ pub struct CreateChatCompletionRequest {
#[serde(skip_serializing_if = "Option::is_none")]
pub presence_penalty: Option<f32>, // min: -2.0, max: 2.0, default 0

/// An object specifying the format that the model must output. Used to enable JSON mode.
/// An object specifying the format that the model must output.
///
/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.
///
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in increased latency and appearance of a "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
#[serde(skip_serializing_if = "Option::is_none")]
pub response_format: Option<ChatCompletionResponseFormat>,

Expand Down Expand Up @@ -1575,6 +1579,9 @@ pub struct CreateChatCompletionStreamResponse {
pub created: u32,
/// The model to generate the completion.
pub model: String,
/// This fingerprint represents the backend configuration that the model runs with.
/// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.
pub system_fingerprint: Option<String>,
/// The object type, which is always `chat.completion.chunk`.
pub object: String,
}
Expand Down
Loading

0 comments on commit 937d277

Please sign in to comment.