Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "1.51.2"
".": "1.52.0"
}
2 changes: 1 addition & 1 deletion .stats.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
configured_endpoints: 68
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-52b934aee6468039ec7f4ce046a282b5fbce114afc708e70f17121df654f71da.yml
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-8729aaa35436531ab453224af10e67f89677db8f350f0346bb3537489edea649.yml
8 changes: 8 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
# Changelog

## 1.52.0 (2024-10-17)

Full Changelog: [v1.51.2...v1.52.0](https://github.com/openai/openai-python/compare/v1.51.2...v1.52.0)

### Features

* **api:** add gpt-4o-audio-preview model for chat completions ([#1796](https://github.com/openai/openai-python/issues/1796)) ([fbf1e0c](https://github.com/openai/openai-python/commit/fbf1e0c25c4d163f06b61a43d1a94ce001033a7b))

## 1.51.2 (2024-10-08)

Full Changelog: [v1.51.1...v1.51.2](https://github.com/openai/openai-python/compare/v1.51.1...v1.51.2)
Expand Down
4 changes: 4 additions & 0 deletions api.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,16 +39,20 @@ Types:
from openai.types.chat import (
ChatCompletion,
ChatCompletionAssistantMessageParam,
ChatCompletionAudio,
ChatCompletionAudioParam,
ChatCompletionChunk,
ChatCompletionContentPart,
ChatCompletionContentPartImage,
ChatCompletionContentPartInputAudio,
ChatCompletionContentPartRefusal,
ChatCompletionContentPartText,
ChatCompletionFunctionCallOption,
ChatCompletionFunctionMessageParam,
ChatCompletionMessage,
ChatCompletionMessageParam,
ChatCompletionMessageToolCall,
ChatCompletionModality,
ChatCompletionNamedToolChoice,
ChatCompletionRole,
ChatCompletionStreamOptions,
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "openai"
version = "1.51.2"
version = "1.52.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
Expand Down
2 changes: 1 addition & 1 deletion src/openai/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "openai"
__version__ = "1.51.2" # x-release-please-version
__version__ = "1.52.0" # x-release-please-version
18 changes: 18 additions & 0 deletions src/openai/resources/beta/chat/completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@
from ....types.chat.chat_completion import ChatCompletion
from ....types.chat.chat_completion_chunk import ChatCompletionChunk
from ....types.chat.parsed_chat_completion import ParsedChatCompletion
from ....types.chat.chat_completion_modality import ChatCompletionModality
from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam
from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam
from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam
from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
Expand Down Expand Up @@ -61,6 +63,7 @@ def parse(
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN,
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
Expand All @@ -70,6 +73,7 @@ def parse(
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -153,6 +157,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma
{
"messages": messages,
"model": model,
"audio": audio,
"frequency_penalty": frequency_penalty,
"function_call": function_call,
"functions": functions,
Expand All @@ -161,6 +166,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma
"max_completion_tokens": max_completion_tokens,
"max_tokens": max_tokens,
"metadata": metadata,
"modalities": modalities,
"n": n,
"parallel_tool_calls": parallel_tool_calls,
"presence_penalty": presence_penalty,
Expand Down Expand Up @@ -198,6 +204,7 @@ def stream(
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN,
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
Expand All @@ -207,6 +214,7 @@ def stream(
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -259,6 +267,7 @@ def stream(
self._client.chat.completions.create,
messages=messages,
model=model,
audio=audio,
stream=True,
response_format=_type_to_response_format(response_format),
frequency_penalty=frequency_penalty,
Expand All @@ -269,6 +278,7 @@ def stream(
max_completion_tokens=max_completion_tokens,
max_tokens=max_tokens,
metadata=metadata,
modalities=modalities,
n=n,
parallel_tool_calls=parallel_tool_calls,
presence_penalty=presence_penalty,
Expand Down Expand Up @@ -320,6 +330,7 @@ async def parse(
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
response_format: type[ResponseFormatT] | NotGiven = NOT_GIVEN,
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
Expand All @@ -329,6 +340,7 @@ async def parse(
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -412,6 +424,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma
{
"messages": messages,
"model": model,
"audio": audio,
"frequency_penalty": frequency_penalty,
"function_call": function_call,
"functions": functions,
Expand All @@ -420,6 +433,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma
"max_completion_tokens": max_completion_tokens,
"max_tokens": max_tokens,
"metadata": metadata,
"modalities": modalities,
"n": n,
"parallel_tool_calls": parallel_tool_calls,
"presence_penalty": presence_penalty,
Expand Down Expand Up @@ -457,6 +471,7 @@ def stream(
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | NotGiven = NOT_GIVEN,
response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven = NOT_GIVEN,
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
Expand All @@ -466,6 +481,7 @@ def stream(
max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN,
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
modalities: Optional[List[ChatCompletionModality]] | NotGiven = NOT_GIVEN,
n: Optional[int] | NotGiven = NOT_GIVEN,
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -519,6 +535,7 @@ def stream(
api_request = self._client.chat.completions.create(
messages=messages,
model=model,
audio=audio,
stream=True,
response_format=_type_to_response_format(response_format),
frequency_penalty=frequency_penalty,
Expand All @@ -529,6 +546,7 @@ def stream(
max_completion_tokens=max_completion_tokens,
max_tokens=max_tokens,
metadata=metadata,
modalities=modalities,
n=n,
parallel_tool_calls=parallel_tool_calls,
presence_penalty=presence_penalty,
Expand Down
Loading