Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "2.9.0"
".": "2.10.0"
}
6 changes: 3 additions & 3 deletions .stats.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
configured_endpoints: 137
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-fe8a79e6fd407e6c9afec60971f03076b65f711ccd6ea16457933b0e24fb1f6d.yml
openapi_spec_hash: 38c0a73f4e08843732c5f8002a809104
config_hash: 2c350086d87a4b4532077363087840e7
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-88d85ff87ad8983262af2b729762a6e05fd509468bb691529bc2f81e4ce27c69.yml
openapi_spec_hash: 46a55acbccd0147534017b92c1f4dd99
config_hash: 141b101c9f13b90e21af74e1686f1f41
19 changes: 19 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,24 @@
# Changelog

## 2.10.0 (2025-12-10)

Full Changelog: [v2.9.0...v2.10.0](https://github.com/openai/openai-python/compare/v2.9.0...v2.10.0)

### Features

* **api:** make model required for the responses/compact endpoint ([a12936b](https://github.com/openai/openai-python/commit/a12936b18cf19009d4e6d586c9b1958359636dbe))


### Bug Fixes

* **types:** allow pyright to infer TypedDict types within SequenceNotStr ([8f0d230](https://github.com/openai/openai-python/commit/8f0d23066c1edc38a6e9858b054dceaf92ae001b))


### Chores

* add missing docstrings ([f20a9a1](https://github.com/openai/openai-python/commit/f20a9a18a421ba69622c77ab539509d218e774eb))
* **internal:** update docstring ([9a993f2](https://github.com/openai/openai-python/commit/9a993f2261b6524aa30b955e006c7ea89f086968))

## 2.9.0 (2025-12-04)

Full Changelog: [v2.8.1...v2.9.0](https://github.com/openai/openai-python/compare/v2.8.1...v2.9.0)
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "openai"
version = "2.9.0"
version = "2.10.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
Expand Down
5 changes: 3 additions & 2 deletions src/openai/_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,9 @@ class HttpxSendArgs(TypedDict, total=False):
if TYPE_CHECKING:
# This works because str.__contains__ does not accept object (either in typeshed or at runtime)
# https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285
#
# Note: index() and count() methods are intentionally omitted to allow pyright to properly
# infer TypedDict types when dict literals are used in lists assigned to SequenceNotStr.
class SequenceNotStr(Protocol[_T_co]):
@overload
def __getitem__(self, index: SupportsIndex, /) -> _T_co: ...
Expand All @@ -255,8 +258,6 @@ def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ...
def __contains__(self, value: object, /) -> bool: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[_T_co]: ...
def index(self, value: Any, start: int = 0, stop: int = ..., /) -> int: ...
def count(self, value: Any, /) -> int: ...
def __reversed__(self) -> Iterator[_T_co]: ...
else:
# just point this to a normal `Sequence` at runtime to avoid having to special case
Expand Down
2 changes: 1 addition & 1 deletion src/openai/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "openai"
__version__ = "2.9.0" # x-release-please-version
__version__ = "2.10.0" # x-release-please-version
4 changes: 2 additions & 2 deletions src/openai/resources/realtime/realtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def calls(self) -> AsyncCallsWithStreamingResponse:


class AsyncRealtimeConnection:
"""Represents a live websocket connection to the Realtime API"""
"""Represents a live WebSocket connection to the Realtime API"""

session: AsyncRealtimeSessionResource
response: AsyncRealtimeResponseResource
Expand Down Expand Up @@ -421,7 +421,7 @@ async def __aexit__(


class RealtimeConnection:
"""Represents a live websocket connection to the Realtime API"""
"""Represents a live WebSocket connection to the Realtime API"""

session: RealtimeSessionResource
response: RealtimeResponseResource
Expand Down
42 changes: 20 additions & 22 deletions src/openai/resources/responses/responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -1526,8 +1526,6 @@ def cancel(
def compact(
self,
*,
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
instructions: Optional[str] | Omit = omit,
model: Union[
Literal[
"gpt-5.1",
Expand Down Expand Up @@ -1614,8 +1612,9 @@ def compact(
],
str,
None,
]
| Omit = omit,
],
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
instructions: Optional[str] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
Expand All @@ -1628,19 +1627,19 @@ def compact(
Compact conversation

Args:
model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.

input: Text, image, or file inputs to the model, used to generate a response

instructions: A system (or developer) message inserted into the model's context. When used
along with `previous_response_id`, the instructions from a previous response
will not be carried over to the next response. This makes it simple to swap out
system (or developer) messages in new responses.

model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.

previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Expand All @@ -1658,9 +1657,9 @@ def compact(
"/responses/compact",
body=maybe_transform(
{
"model": model,
"input": input,
"instructions": instructions,
"model": model,
"previous_response_id": previous_response_id,
},
response_compact_params.ResponseCompactParams,
Expand Down Expand Up @@ -3140,8 +3139,6 @@ async def cancel(
async def compact(
self,
*,
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
instructions: Optional[str] | Omit = omit,
model: Union[
Literal[
"gpt-5.1",
Expand Down Expand Up @@ -3228,8 +3225,9 @@ async def compact(
],
str,
None,
]
| Omit = omit,
],
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
instructions: Optional[str] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
Expand All @@ -3242,19 +3240,19 @@ async def compact(
Compact conversation

Args:
model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.

input: Text, image, or file inputs to the model, used to generate a response

instructions: A system (or developer) message inserted into the model's context. When used
along with `previous_response_id`, the instructions from a previous response
will not be carried over to the next response. This makes it simple to swap out
system (or developer) messages in new responses.

model: Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.

previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Expand All @@ -3272,9 +3270,9 @@ async def compact(
"/responses/compact",
body=await async_maybe_transform(
{
"model": model,
"input": input,
"instructions": instructions,
"model": model,
"previous_response_id": previous_response_id,
},
response_compact_params.ResponseCompactParams,
Expand Down
10 changes: 10 additions & 0 deletions src/openai/types/audio/transcription.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ class Logprob(BaseModel):


class UsageTokensInputTokenDetails(BaseModel):
"""Details about the input tokens billed for this request."""

audio_tokens: Optional[int] = None
"""Number of audio tokens billed for this request."""

Expand All @@ -29,6 +31,8 @@ class UsageTokensInputTokenDetails(BaseModel):


class UsageTokens(BaseModel):
"""Usage statistics for models billed by token usage."""

input_tokens: int
"""Number of input tokens billed for this request."""

Expand All @@ -46,6 +50,8 @@ class UsageTokens(BaseModel):


class UsageDuration(BaseModel):
"""Usage statistics for models billed by audio input duration."""

seconds: float
"""Duration of the input audio in seconds."""

Expand All @@ -57,6 +63,10 @@ class UsageDuration(BaseModel):


class Transcription(BaseModel):
"""
Represents a transcription response returned by model, based on the provided input.
"""

text: str
"""The transcribed text."""

Expand Down
10 changes: 10 additions & 0 deletions src/openai/types/audio/transcription_diarized.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@


class UsageTokensInputTokenDetails(BaseModel):
"""Details about the input tokens billed for this request."""

audio_tokens: Optional[int] = None
"""Number of audio tokens billed for this request."""

Expand All @@ -19,6 +21,8 @@ class UsageTokensInputTokenDetails(BaseModel):


class UsageTokens(BaseModel):
"""Usage statistics for models billed by token usage."""

input_tokens: int
"""Number of input tokens billed for this request."""

Expand All @@ -36,6 +40,8 @@ class UsageTokens(BaseModel):


class UsageDuration(BaseModel):
"""Usage statistics for models billed by audio input duration."""

seconds: float
"""Duration of the input audio in seconds."""

Expand All @@ -47,6 +53,10 @@ class UsageDuration(BaseModel):


class TranscriptionDiarized(BaseModel):
"""
Represents a diarized transcription response returned by the model, including the combined transcript and speaker-segment annotations.
"""

duration: float
"""Duration of the input audio in seconds."""

Expand Down
2 changes: 2 additions & 0 deletions src/openai/types/audio/transcription_diarized_segment.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@


class TranscriptionDiarizedSegment(BaseModel):
"""A segment of diarized transcript text with speaker metadata."""

id: str
"""Unique identifier for the segment."""

Expand Down
5 changes: 5 additions & 0 deletions src/openai/types/audio/transcription_text_delta_event.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,11 @@ class Logprob(BaseModel):


class TranscriptionTextDeltaEvent(BaseModel):
"""Emitted when there is an additional text delta.

This is also the first event emitted when the transcription starts. Only emitted when you [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with the `Stream` parameter set to `true`.
"""

delta: str
"""The text delta that was additionally transcribed."""

Expand Down
9 changes: 9 additions & 0 deletions src/openai/types/audio/transcription_text_done_event.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ class Logprob(BaseModel):


class UsageInputTokenDetails(BaseModel):
"""Details about the input tokens billed for this request."""

audio_tokens: Optional[int] = None
"""Number of audio tokens billed for this request."""

Expand All @@ -28,6 +30,8 @@ class UsageInputTokenDetails(BaseModel):


class Usage(BaseModel):
"""Usage statistics for models billed by token usage."""

input_tokens: int
"""Number of input tokens billed for this request."""

Expand All @@ -45,6 +49,11 @@ class Usage(BaseModel):


class TranscriptionTextDoneEvent(BaseModel):
"""Emitted when the transcription is complete.

Contains the complete transcription text. Only emitted when you [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with the `Stream` parameter set to `true`.
"""

text: str
"""The text that was transcribed."""

Expand Down
4 changes: 4 additions & 0 deletions src/openai/types/audio/transcription_text_segment_event.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@


class TranscriptionTextSegmentEvent(BaseModel):
"""
Emitted when a diarized transcription returns a completed segment with speaker information. Only emitted when you [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with `stream` set to `true` and `response_format` set to `diarized_json`.
"""

id: str
"""Unique identifier for the segment."""

Expand Down
6 changes: 6 additions & 0 deletions src/openai/types/audio/transcription_verbose.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@


class Usage(BaseModel):
"""Usage statistics for models billed by audio input duration."""

seconds: float
"""Duration of the input audio in seconds."""

Expand All @@ -19,6 +21,10 @@ class Usage(BaseModel):


class TranscriptionVerbose(BaseModel):
"""
Represents a verbose json transcription response returned by model, based on the provided input.
"""

duration: float
"""The duration of the input audio."""

Expand Down
5 changes: 5 additions & 0 deletions src/openai/types/auto_file_chunking_strategy_param.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,10 @@


class AutoFileChunkingStrategyParam(TypedDict, total=False):
"""The default strategy.

This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`.
"""

type: Required[Literal["auto"]]
"""Always `auto`."""
4 changes: 4 additions & 0 deletions src/openai/types/batch_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,10 @@ class BatchCreateParams(TypedDict, total=False):


class OutputExpiresAfter(TypedDict, total=False):
"""
The expiration policy for the output and/or error file that are generated for a batch.
"""

anchor: Required[Literal["created_at"]]
"""Anchor timestamp after which the expiration policy applies.

Expand Down
2 changes: 2 additions & 0 deletions src/openai/types/batch_request_counts.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@


class BatchRequestCounts(BaseModel):
"""The request counts for different statuses within the batch."""

completed: int
"""Number of requests that have been completed successfully."""

Expand Down
Loading
Loading