From afc2fb2e6e0dbc4cbb6633d155dad5cd00b9e72a Mon Sep 17 00:00:00 2001 From: "claude[bot]" <209825114+claude[bot]@users.noreply.github.com> Date: Thu, 7 May 2026 00:31:49 +0000 Subject: [PATCH] chore(openapi): Sync vendored OpenAPI spec with canonical schemas Re-run `task oas-download` so the local `openapi.yaml` matches the canonical spec. `ChatCompletionChoice.logprobs` now models the structured object (with `content` and `refusal` arrays of `ChatCompletionTokenLogprob`) and is no longer required, and trailing whitespace drift on `FunctionObject.parameters.description` and `CreateChatCompletionRequest.reasoning_format.description` is picked up. `inference_gateway/models.py` is regenerated from the refreshed spec. Refs #6 Co-authored-by: Eden Reich --- inference_gateway/models.py | 6 +++++- openapi.yaml | 23 ++++++++++++++++++++--- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/inference_gateway/models.py b/inference_gateway/models.py index 9484e9b..75d6a73 100644 --- a/inference_gateway/models.py +++ b/inference_gateway/models.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: openapi.yaml -# timestamp: 2026-05-06T14:19:53+00:00 +# timestamp: 2026-05-07T00:31:10+00:00 from __future__ import annotations @@ -690,6 +690,10 @@ class ChatCompletionChoice(BaseModel): The index of the choice in the list of choices. """ message: Message + logprobs: Logprobs | None = None + """ + Log probability information for the choice. + """ class CreateChatCompletionResponse(BaseModel): diff --git a/openapi.yaml b/openapi.yaml index 331b8d3..d27e267 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -933,7 +933,7 @@ components: See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - documentation about the format. + documentation about the format. Omitting `parameters` defines a function with an empty parameter list. additionalProperties: true @@ -1023,7 +1023,7 @@ components: The format of the reasoning content. Can be `raw` or `parsed`. When specified as raw some reasoning models will output tags. - When specified as parsed the model will output the reasoning under + When specified as parsed the model will output the reasoning under `reasoning` or `reasoning_content` attribute. required: - model @@ -1072,11 +1072,28 @@ components: description: The index of the choice in the list of choices. message: $ref: '#/components/schemas/Message' + logprobs: + description: Log probability information for the choice. + type: object + nullable: true + properties: + content: + description: A list of message content tokens with log probability information. + type: array + items: + $ref: '#/components/schemas/ChatCompletionTokenLogprob' + refusal: + description: A list of message refusal tokens with log probability information. + type: array + items: + $ref: '#/components/schemas/ChatCompletionTokenLogprob' + required: + - content + - refusal required: - finish_reason - index - message - - logprobs ChatCompletionStreamChoice: type: object required: