Skip to content

Commit

Permalink
chore(api): update docs (#1212)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-bot committed Mar 13, 2024
1 parent a0caa09 commit 3208335
Show file tree
Hide file tree
Showing 17 changed files with 98 additions and 73 deletions.
18 changes: 6 additions & 12 deletions src/openai/resources/audio/speech.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def create(
input: str,
model: Union[str, Literal["tts-1", "tts-1-hd"]],
voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
response_format: Literal["mp3", "opus", "aac", "flac", "pcm", "wav"] | NotGiven = NOT_GIVEN,
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
speed: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
Expand All @@ -68,11 +68,8 @@ def create(
available in the
[Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).
response_format: The format to return audio in. Supported formats are `mp3`, `opus`, `aac`,
`flac`, `pcm`, and `wav`.
The `pcm` audio format, similar to `wav` but without a header, utilizes a 24kHz
sample rate, mono channel, and 16-bit depth in signed little-endian format.
response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
`wav`, and `pcm`.
speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
the default.
Expand Down Expand Up @@ -120,7 +117,7 @@ async def create(
input: str,
model: Union[str, Literal["tts-1", "tts-1-hd"]],
voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
response_format: Literal["mp3", "opus", "aac", "flac", "pcm", "wav"] | NotGiven = NOT_GIVEN,
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
speed: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
Expand All @@ -144,11 +141,8 @@ async def create(
available in the
[Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).
response_format: The format to return audio in. Supported formats are `mp3`, `opus`, `aac`,
`flac`, `pcm`, and `wav`.
The `pcm` audio format, similar to `wav` but without a header, utilizes a 24kHz
sample rate, mono channel, and 16-bit depth in signed little-endian format.
response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
`wav`, and `pcm`.
speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
the default.
Expand Down
22 changes: 14 additions & 8 deletions src/openai/resources/audio/transcriptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,8 @@ def create(
The audio file object (not file name) to transcribe, in one of these formats:
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. Only `whisper-1` is currently available.
model: ID of the model to use. Only `whisper-1` (which is powered by our open source
Whisper V2 model) is currently available.
language: The language of the input audio. Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will
Expand All @@ -80,9 +81,11 @@ def create(
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
timestamp_granularities: The timestamp granularities to populate for this transcription. Any of these
options: `word`, or `segment`. Note: There is no additional latency for segment
timestamps, but generating word timestamps incurs additional latency.
timestamp_granularities: The timestamp granularities to populate for this transcription.
`response_format` must be set `verbose_json` to use timestamp granularities.
Either or both of these options are supported: `word`, or `segment`. Note: There
is no additional latency for segment timestamps, but generating word timestamps
incurs additional latency.
extra_headers: Send extra headers
Expand Down Expand Up @@ -154,7 +157,8 @@ async def create(
The audio file object (not file name) to transcribe, in one of these formats:
flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. Only `whisper-1` is currently available.
model: ID of the model to use. Only `whisper-1` (which is powered by our open source
Whisper V2 model) is currently available.
language: The language of the input audio. Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will
Expand All @@ -174,9 +178,11 @@ async def create(
[log probability](https://en.wikipedia.org/wiki/Log_probability) to
automatically increase the temperature until certain thresholds are hit.
timestamp_granularities: The timestamp granularities to populate for this transcription. Any of these
options: `word`, or `segment`. Note: There is no additional latency for segment
timestamps, but generating word timestamps incurs additional latency.
timestamp_granularities: The timestamp granularities to populate for this transcription.
`response_format` must be set `verbose_json` to use timestamp granularities.
Either or both of these options are supported: `word`, or `segment`. Note: There
is no additional latency for segment timestamps, but generating word timestamps
incurs additional latency.
extra_headers: Send extra headers
Expand Down
6 changes: 4 additions & 2 deletions src/openai/resources/audio/translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,8 @@ def create(
file: The audio file object (not file name) translate, in one of these formats: flac,
mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. Only `whisper-1` is currently available.
model: ID of the model to use. Only `whisper-1` (which is powered by our open source
Whisper V2 model) is currently available.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
Expand Down Expand Up @@ -138,7 +139,8 @@ async def create(
file: The audio file object (not file name) translate, in one of these formats: flac,
mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
model: ID of the model to use. Only `whisper-1` is currently available.
model: ID of the model to use. Only `whisper-1` (which is powered by our open source
Whisper V2 model) is currently available.
prompt: An optional text to guide the model's style or continue a previous audio
segment. The
Expand Down
36 changes: 18 additions & 18 deletions src/openai/resources/chat/completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,9 +208,9 @@ def create(
tool. Use this to provide a list of functions the model may generate JSON inputs
for.
top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return
at each token position, each with an associated log probability. `logprobs` must
be set to `true` if this parameter is used.
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
Expand Down Expand Up @@ -398,9 +398,9 @@ def create(
tool. Use this to provide a list of functions the model may generate JSON inputs
for.
top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return
at each token position, each with an associated log probability. `logprobs` must
be set to `true` if this parameter is used.
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
Expand Down Expand Up @@ -588,9 +588,9 @@ def create(
tool. Use this to provide a list of functions the model may generate JSON inputs
for.
top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return
at each token position, each with an associated log probability. `logprobs` must
be set to `true` if this parameter is used.
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
Expand Down Expand Up @@ -875,9 +875,9 @@ async def create(
tool. Use this to provide a list of functions the model may generate JSON inputs
for.
top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return
at each token position, each with an associated log probability. `logprobs` must
be set to `true` if this parameter is used.
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
Expand Down Expand Up @@ -1065,9 +1065,9 @@ async def create(
tool. Use this to provide a list of functions the model may generate JSON inputs
for.
top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return
at each token position, each with an associated log probability. `logprobs` must
be set to `true` if this parameter is used.
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
Expand Down Expand Up @@ -1255,9 +1255,9 @@ async def create(
tool. Use this to provide a list of functions the model may generate JSON inputs
for.
top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return
at each token position, each with an associated log probability. `logprobs` must
be set to `true` if this parameter is used.
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
Expand Down
18 changes: 12 additions & 6 deletions src/openai/resources/images.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,8 @@ def create_variation(
`n=1` is supported.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`.
`b64_json`. URLs are only valid for 60 minutes after the image has been
generated.
size: The size of the generated images. Must be one of `256x256`, `512x512`, or
`1024x1024`.
Expand Down Expand Up @@ -151,7 +152,8 @@ def edit(
n: The number of images to generate. Must be between 1 and 10.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`.
`b64_json`. URLs are only valid for 60 minutes after the image has been
generated.
size: The size of the generated images. Must be one of `256x256`, `512x512`, or
`1024x1024`.
Expand Down Expand Up @@ -231,7 +233,8 @@ def generate(
for `dall-e-3`.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`.
`b64_json`. URLs are only valid for 60 minutes after the image has been
generated.
size: The size of the generated images. Must be one of `256x256`, `512x512`, or
`1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
Expand Down Expand Up @@ -315,7 +318,8 @@ async def create_variation(
`n=1` is supported.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`.
`b64_json`. URLs are only valid for 60 minutes after the image has been
generated.
size: The size of the generated images. Must be one of `256x256`, `512x512`, or
`1024x1024`.
Expand Down Expand Up @@ -396,7 +400,8 @@ async def edit(
n: The number of images to generate. Must be between 1 and 10.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`.
`b64_json`. URLs are only valid for 60 minutes after the image has been
generated.
size: The size of the generated images. Must be one of `256x256`, `512x512`, or
`1024x1024`.
Expand Down Expand Up @@ -476,7 +481,8 @@ async def generate(
for `dall-e-3`.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`.
`b64_json`. URLs are only valid for 60 minutes after the image has been
generated.
size: The size of the generated images. Must be one of `256x256`, `512x512`, or
`1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
Expand Down
4 changes: 2 additions & 2 deletions src/openai/resources/moderations.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def create(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ModerationCreateResponse:
"""
Classifies if text violates OpenAI's Content Policy
Classifies if text is potentially harmful.
Args:
input: The input text to classify
Expand Down Expand Up @@ -106,7 +106,7 @@ async def create(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ModerationCreateResponse:
"""
Classifies if text violates OpenAI's Content Policy
Classifies if text is potentially harmful.
Args:
input: The input text to classify
Expand Down
9 changes: 3 additions & 6 deletions src/openai/types/audio/speech_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,10 @@ class SpeechCreateParams(TypedDict, total=False):
[Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).
"""

response_format: Literal["mp3", "opus", "aac", "flac", "pcm", "wav"]
"""The format to return audio in.
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"]
"""The format to audio in.
Supported formats are `mp3`, `opus`, `aac`, `flac`, `pcm`, and `wav`.
The `pcm` audio format, similar to `wav` but without a header, utilizes a 24kHz
sample rate, mono channel, and 16-bit depth in signed little-endian format.
Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
"""

speed: float
Expand Down
1 change: 1 addition & 0 deletions src/openai/types/audio/transcription.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@

class Transcription(BaseModel):
text: str
"""The transcribed text."""
13 changes: 9 additions & 4 deletions src/openai/types/audio/transcription_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,11 @@ class TranscriptionCreateParams(TypedDict, total=False):
"""

model: Required[Union[str, Literal["whisper-1"]]]
"""ID of the model to use. Only `whisper-1` is currently available."""
"""ID of the model to use.
Only `whisper-1` (which is powered by our open source Whisper V2 model) is
currently available.
"""

language: str
"""The language of the input audio.
Expand Down Expand Up @@ -54,7 +58,8 @@ class TranscriptionCreateParams(TypedDict, total=False):
timestamp_granularities: List[Literal["word", "segment"]]
"""The timestamp granularities to populate for this transcription.
Any of these options: `word`, or `segment`. Note: There is no additional latency
for segment timestamps, but generating word timestamps incurs additional
latency.
`response_format` must be set `verbose_json` to use timestamp granularities.
Either or both of these options are supported: `word`, or `segment`. Note: There
is no additional latency for segment timestamps, but generating word timestamps
incurs additional latency.
"""
6 changes: 5 additions & 1 deletion src/openai/types/audio/translation_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,11 @@ class TranslationCreateParams(TypedDict, total=False):
"""

model: Required[Union[str, Literal["whisper-1"]]]
"""ID of the model to use. Only `whisper-1` is currently available."""
"""ID of the model to use.
Only `whisper-1` (which is powered by our open source Whisper V2 model) is
currently available.
"""

prompt: str
"""An optional text to guide the model's style or continue a previous audio
Expand Down
4 changes: 2 additions & 2 deletions src/openai/types/beta/threads/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@


class LastError(BaseModel):
code: Literal["server_error", "rate_limit_exceeded"]
"""One of `server_error` or `rate_limit_exceeded`."""
code: Literal["server_error", "rate_limit_exceeded", "invalid_prompt"]
"""One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`."""

message: str
"""A human-readable description of the error."""
Expand Down
14 changes: 12 additions & 2 deletions src/openai/types/chat/chat_completion_token_logprob.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,12 @@ class TopLogprob(BaseModel):
"""

logprob: float
"""The log probability of this token."""
"""The log probability of this token, if it is within the top 20 most likely
tokens.
Otherwise, the value `-9999.0` is used to signify that the token is very
unlikely.
"""


class ChatCompletionTokenLogprob(BaseModel):
Expand All @@ -36,7 +41,12 @@ class ChatCompletionTokenLogprob(BaseModel):
"""

logprob: float
"""The log probability of this token."""
"""The log probability of this token, if it is within the top 20 most likely
tokens.
Otherwise, the value `-9999.0` is used to signify that the token is very
unlikely.
"""

top_logprobs: List[TopLogprob]
"""List of the most likely tokens and their log probability, at this token
Expand Down
6 changes: 3 additions & 3 deletions src/openai/types/chat/completion_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,9 +195,9 @@ class CompletionCreateParamsBase(TypedDict, total=False):

top_logprobs: Optional[int]
"""
An integer between 0 and 5 specifying the number of most likely tokens to return
at each token position, each with an associated log probability. `logprobs` must
be set to `true` if this parameter is used.
An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
"""

top_p: Optional[float]
Expand Down

0 comments on commit 3208335

Please sign in to comment.