Skip to content

Commit

Permalink
feat(api): add timestamp_granularities, add gpt-3.5-turbo-0125 mo…
Browse files Browse the repository at this point in the history
…del (#1125)
  • Loading branch information
stainless-bot committed Feb 9, 2024
1 parent d231d1f commit 489dadf
Show file tree
Hide file tree
Showing 5 changed files with 44 additions and 10 deletions.
14 changes: 13 additions & 1 deletion src/openai/resources/audio/transcriptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from __future__ import annotations

from typing import Union, Mapping, cast
from typing import List, Union, Mapping, cast
from typing_extensions import Literal

import httpx
Expand Down Expand Up @@ -39,6 +39,7 @@ def create(
prompt: str | NotGiven = NOT_GIVEN,
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand Down Expand Up @@ -74,6 +75,10 @@ def create(
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
timestamp_granularities: The timestamp granularities to populate for this transcription. Any of these
options: `word`, or `segment`. Note: There is no additional latency for segment
timestamps, but generating word timestamps incurs additional latency.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
Expand All @@ -90,6 +95,7 @@ def create(
"prompt": prompt,
"response_format": response_format,
"temperature": temperature,
"timestamp_granularities": timestamp_granularities,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
Expand Down Expand Up @@ -127,6 +133,7 @@ async def create(
prompt: str | NotGiven = NOT_GIVEN,
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand Down Expand Up @@ -162,6 +169,10 @@ async def create(
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
timestamp_granularities: The timestamp granularities to populate for this transcription. Any of these
options: `word`, or `segment`. Note: There is no additional latency for segment
timestamps, but generating word timestamps incurs additional latency.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
Expand All @@ -178,6 +189,7 @@ async def create(
"prompt": prompt,
"response_format": response_format,
"temperature": temperature,
"timestamp_granularities": timestamp_granularities,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
Expand Down
20 changes: 14 additions & 6 deletions src/openai/resources/chat/completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ def create(
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
Expand Down Expand Up @@ -155,7 +156,7 @@ def create(
response_format: An object specifying the format that the model must output. Compatible with
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
`gpt-3.5-turbo-1106`.
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
Expand Down Expand Up @@ -250,6 +251,7 @@ def create(
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
Expand Down Expand Up @@ -351,7 +353,7 @@ def create(
response_format: An object specifying the format that the model must output. Compatible with
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
`gpt-3.5-turbo-1106`.
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
Expand Down Expand Up @@ -439,6 +441,7 @@ def create(
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
Expand Down Expand Up @@ -540,7 +543,7 @@ def create(
response_format: An object specifying the format that the model must output. Compatible with
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
`gpt-3.5-turbo-1106`.
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
Expand Down Expand Up @@ -628,6 +631,7 @@ def create(
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
Expand Down Expand Up @@ -724,6 +728,7 @@ async def create(
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
Expand Down Expand Up @@ -818,7 +823,7 @@ async def create(
response_format: An object specifying the format that the model must output. Compatible with
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
`gpt-3.5-turbo-1106`.
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
Expand Down Expand Up @@ -913,6 +918,7 @@ async def create(
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
Expand Down Expand Up @@ -1014,7 +1020,7 @@ async def create(
response_format: An object specifying the format that the model must output. Compatible with
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
`gpt-3.5-turbo-1106`.
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
Expand Down Expand Up @@ -1102,6 +1108,7 @@ async def create(
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
Expand Down Expand Up @@ -1203,7 +1210,7 @@ async def create(
response_format: An object specifying the format that the model must output. Compatible with
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
`gpt-3.5-turbo-1106`.
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
Expand Down Expand Up @@ -1291,6 +1298,7 @@ async def create(
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
],
Expand Down
15 changes: 13 additions & 2 deletions src/openai/types/audio/transcription_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@

from __future__ import annotations

from typing import Union
from typing_extensions import Literal, Required, TypedDict
from typing import List, Union
from typing_extensions import Literal, Required, Annotated, TypedDict

from ..._types import FileTypes
from ..._utils import PropertyInfo

__all__ = ["TranscriptionCreateParams"]

Expand Down Expand Up @@ -50,3 +51,13 @@ class TranscriptionCreateParams(TypedDict, total=False):
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
"""

timestamp_granularities: Annotated[
List[Literal["word", "segment"]], PropertyInfo(alias="timestamp_granularities[]")
]
"""The timestamp granularities to populate for this transcription.
Any of these options: `word`, or `segment`. Note: There is no additional latency
for segment timestamps, but generating word timestamps incurs additional
latency.
"""
3 changes: 2 additions & 1 deletion src/openai/types/chat/completion_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
]
Expand Down Expand Up @@ -137,7 +138,7 @@ class CompletionCreateParamsBase(TypedDict, total=False):
Compatible with
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
`gpt-3.5-turbo-1106`.
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
message the model generates is valid JSON.
Expand Down
2 changes: 2 additions & 0 deletions tests/api_resources/audio/test_transcriptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
prompt="string",
response_format="json",
temperature=0,
timestamp_granularities=["word", "segment"],
)
assert_matches_type(Transcription, transcription, path=["response"])

Expand Down Expand Up @@ -84,6 +85,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
prompt="string",
response_format="json",
temperature=0,
timestamp_granularities=["word", "segment"],
)
assert_matches_type(Transcription, transcription, path=["response"])

Expand Down

0 comments on commit 489dadf

Please sign in to comment.