From 59b290fa39c8d4d8079994b1d04dd814d12b4526 Mon Sep 17 00:00:00 2001 From: stainless-bot Date: Wed, 26 Jun 2024 11:19:35 +0000 Subject: [PATCH] feat(api): update via SDK Studio --- .stats.yml | 2 +- README.md | 2 +- bin/publish-pypi | 3 + src/groq/_base_client.py | 17 +- src/groq/_utils/__init__.py | 1 + src/groq/_utils/_reflection.py | 8 + src/groq/_utils/_sync.py | 19 +- src/groq/resources/audio/transcriptions.py | 208 +++++++++++++++++- .../audio/transcription_create_params.py | 103 ++++++++- ...chat_completion_assistant_message_param.py | 7 +- .../chat_completion_function_message_param.py | 3 + .../chat_completion_system_message_param.py | 4 + .../chat_completion_tool_message_param.py | 3 + .../chat_completion_user_message_param.py | 5 +- .../audio/test_transcriptions.py | 4 +- tests/api_resources/chat/test_completions.py | 2 + 16 files changed, 376 insertions(+), 15 deletions(-) create mode 100644 src/groq/_utils/_reflection.py diff --git a/.stats.yml b/.stats.yml index 1f378123..86a0c791 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 7 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-a4c1214ecaa24ad37fbb3c12b8392787ebe0fd51c5e7e08bdf25d7608dc7900b.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-d9cb66d87629b96a931ce71a189b1482b6ef980aec670e03ffaf75efde6f306a.yml diff --git a/README.md b/README.md index a8b47a58..a352523f 100644 --- a/README.md +++ b/README.md @@ -337,7 +337,7 @@ You can directly override the [httpx client](https://www.python-httpx.org/api/#c - Support for proxies - Custom transports -- Additional [advanced](https://www.python-httpx.org/advanced/#client-instances) functionality +- Additional [advanced](https://www.python-httpx.org/advanced/clients/) functionality ```python from groq import Groq, DefaultHttpxClient diff --git a/bin/publish-pypi b/bin/publish-pypi index 826054e9..05bfccbb 100644 --- a/bin/publish-pypi +++ b/bin/publish-pypi @@ -3,4 +3,7 @@ set -eux mkdir -p dist rye build --clean +# Patching importlib-metadata version until upstream library version is updated +# https://github.com/pypa/twine/issues/977#issuecomment-2189800841 +"$HOME/.rye/self/bin/python3" -m pip install 'importlib-metadata==7.2.1' rye publish --yes --token=$PYPI_TOKEN diff --git a/src/groq/_base_client.py b/src/groq/_base_client.py index d6edf383..51b73a49 100644 --- a/src/groq/_base_client.py +++ b/src/groq/_base_client.py @@ -60,7 +60,7 @@ RequestOptions, ModelBuilderProtocol, ) -from ._utils import is_dict, is_list, is_given, lru_cache, is_mapping +from ._utils import is_dict, is_list, asyncify, is_given, lru_cache, is_mapping from ._compat import model_copy, model_dump from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type from ._response import ( @@ -358,6 +358,7 @@ def __init__( self._custom_query = custom_query or {} self._strict_response_validation = _strict_response_validation self._idempotency_header = None + self._platform: Platform | None = None if max_retries is None: # pyright: ignore[reportUnnecessaryComparison] raise TypeError( @@ -622,7 +623,10 @@ def base_url(self, url: URL | str) -> None: self._base_url = self._enforce_trailing_slash(url if isinstance(url, URL) else URL(url)) def platform_headers(self) -> Dict[str, str]: - return platform_headers(self._version) + # the actual implementation is in a separate `lru_cache` decorated + # function because adding `lru_cache` to methods will leak memory + # https://github.com/python/cpython/issues/88476 + return platform_headers(self._version, platform=self._platform) def _parse_retry_after_header(self, response_headers: Optional[httpx.Headers] = None) -> float | None: """Returns a float of the number of seconds (not milliseconds) to wait after retrying, or None if unspecified. @@ -1498,6 +1502,11 @@ async def _request( stream_cls: type[_AsyncStreamT] | None, remaining_retries: int | None, ) -> ResponseT | _AsyncStreamT: + if self._platform is None: + # `get_platform` can make blocking IO calls so we + # execute it earlier while we are in an async context + self._platform = await asyncify(get_platform)() + cast_to = self._maybe_override_cast_to(cast_to, options) await self._prepare_options(options) @@ -1921,11 +1930,11 @@ def get_platform() -> Platform: @lru_cache(maxsize=None) -def platform_headers(version: str) -> Dict[str, str]: +def platform_headers(version: str, *, platform: Platform | None) -> Dict[str, str]: return { "X-Stainless-Lang": "python", "X-Stainless-Package-Version": version, - "X-Stainless-OS": str(get_platform()), + "X-Stainless-OS": str(platform or get_platform()), "X-Stainless-Arch": str(get_architecture()), "X-Stainless-Runtime": get_python_runtime(), "X-Stainless-Runtime-Version": get_python_version(), diff --git a/src/groq/_utils/__init__.py b/src/groq/_utils/__init__.py index 31b5b227..667e2473 100644 --- a/src/groq/_utils/__init__.py +++ b/src/groq/_utils/__init__.py @@ -49,3 +49,4 @@ maybe_transform as maybe_transform, async_maybe_transform as async_maybe_transform, ) +from ._reflection import function_has_argument as function_has_argument diff --git a/src/groq/_utils/_reflection.py b/src/groq/_utils/_reflection.py new file mode 100644 index 00000000..e134f58e --- /dev/null +++ b/src/groq/_utils/_reflection.py @@ -0,0 +1,8 @@ +import inspect +from typing import Any, Callable + + +def function_has_argument(func: Callable[..., Any], arg_name: str) -> bool: + """Returns whether or not the given function has a specific parameter""" + sig = inspect.signature(func) + return arg_name in sig.parameters diff --git a/src/groq/_utils/_sync.py b/src/groq/_utils/_sync.py index 595924e5..d0d81033 100644 --- a/src/groq/_utils/_sync.py +++ b/src/groq/_utils/_sync.py @@ -7,6 +7,8 @@ import anyio import anyio.to_thread +from ._reflection import function_has_argument + T_Retval = TypeVar("T_Retval") T_ParamSpec = ParamSpec("T_ParamSpec") @@ -59,6 +61,21 @@ def do_work(arg1, arg2, kwarg1="", kwarg2="") -> str: async def wrapper(*args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs) -> T_Retval: partial_f = functools.partial(function, *args, **kwargs) - return await anyio.to_thread.run_sync(partial_f, cancellable=cancellable, limiter=limiter) + + # In `v4.1.0` anyio added the `abandon_on_cancel` argument and deprecated the old + # `cancellable` argument, so we need to use the new `abandon_on_cancel` to avoid + # surfacing deprecation warnings. + if function_has_argument(anyio.to_thread.run_sync, "abandon_on_cancel"): + return await anyio.to_thread.run_sync( + partial_f, + abandon_on_cancel=cancellable, + limiter=limiter, + ) + + return await anyio.to_thread.run_sync( + partial_f, + cancellable=cancellable, + limiter=limiter, + ) return wrapper diff --git a/src/groq/resources/audio/transcriptions.py b/src/groq/resources/audio/transcriptions.py index 8b0de529..3880930a 100644 --- a/src/groq/resources/audio/transcriptions.py +++ b/src/groq/resources/audio/transcriptions.py @@ -45,7 +45,109 @@ def create( *, file: FileTypes, model: Union[str, Literal["whisper-large-v3"]], - language: str | NotGiven = NOT_GIVEN, + language: Literal[ + "en", + "zh", + "de", + "es", + "ru", + "ko", + "fr", + "ja", + "pt", + "tr", + "pl", + "ca", + "nl", + "ar", + "sv", + "it", + "id", + "hi", + "fi", + "vi", + "he", + "uk", + "el", + "ms", + "cs", + "ro", + "da", + "hu", + "ta", + "no", + "th", + "ur", + "hr", + "bg", + "lt", + "la", + "mi", + "ml", + "cy", + "sk", + "te", + "fa", + "lv", + "bn", + "sr", + "az", + "sl", + "kn", + "et", + "mk", + "br", + "eu", + "is", + "hy", + "ne", + "mn", + "bs", + "kk", + "sq", + "sw", + "gl", + "mr", + "pa", + "si", + "km", + "sn", + "yo", + "so", + "af", + "oc", + "ka", + "be", + "tg", + "sd", + "gu", + "am", + "yi", + "lo", + "uz", + "fo", + "ht", + "ps", + "tk", + "nn", + "mt", + "sa", + "lb", + "my", + "bo", + "tl", + "mg", + "as", + "tt", + "haw", + "ln", + "ha", + "ba", + "jw", + "su", + "yue", + ] + | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, response_format: Literal["json", "text", "verbose_json"] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, @@ -140,7 +242,109 @@ async def create( *, file: FileTypes, model: Union[str, Literal["whisper-large-v3"]], - language: str | NotGiven = NOT_GIVEN, + language: Literal[ + "en", + "zh", + "de", + "es", + "ru", + "ko", + "fr", + "ja", + "pt", + "tr", + "pl", + "ca", + "nl", + "ar", + "sv", + "it", + "id", + "hi", + "fi", + "vi", + "he", + "uk", + "el", + "ms", + "cs", + "ro", + "da", + "hu", + "ta", + "no", + "th", + "ur", + "hr", + "bg", + "lt", + "la", + "mi", + "ml", + "cy", + "sk", + "te", + "fa", + "lv", + "bn", + "sr", + "az", + "sl", + "kn", + "et", + "mk", + "br", + "eu", + "is", + "hy", + "ne", + "mn", + "bs", + "kk", + "sq", + "sw", + "gl", + "mr", + "pa", + "si", + "km", + "sn", + "yo", + "so", + "af", + "oc", + "ka", + "be", + "tg", + "sd", + "gu", + "am", + "yi", + "lo", + "uz", + "fo", + "ht", + "ps", + "tk", + "nn", + "mt", + "sa", + "lb", + "my", + "bo", + "tl", + "mg", + "as", + "tt", + "haw", + "ln", + "ha", + "ba", + "jw", + "su", + "yue", + ] + | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, response_format: Literal["json", "text", "verbose_json"] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, diff --git a/src/groq/types/audio/transcription_create_params.py b/src/groq/types/audio/transcription_create_params.py index 2f030e20..ecd0437f 100644 --- a/src/groq/types/audio/transcription_create_params.py +++ b/src/groq/types/audio/transcription_create_params.py @@ -20,7 +20,108 @@ class TranscriptionCreateParams(TypedDict, total=False): model: Required[Union[str, Literal["whisper-large-v3"]]] """ID of the model to use. Only `whisper-large-v3` is currently available.""" - language: str + language: Literal[ + "en", + "zh", + "de", + "es", + "ru", + "ko", + "fr", + "ja", + "pt", + "tr", + "pl", + "ca", + "nl", + "ar", + "sv", + "it", + "id", + "hi", + "fi", + "vi", + "he", + "uk", + "el", + "ms", + "cs", + "ro", + "da", + "hu", + "ta", + "no", + "th", + "ur", + "hr", + "bg", + "lt", + "la", + "mi", + "ml", + "cy", + "sk", + "te", + "fa", + "lv", + "bn", + "sr", + "az", + "sl", + "kn", + "et", + "mk", + "br", + "eu", + "is", + "hy", + "ne", + "mn", + "bs", + "kk", + "sq", + "sw", + "gl", + "mr", + "pa", + "si", + "km", + "sn", + "yo", + "so", + "af", + "oc", + "ka", + "be", + "tg", + "sd", + "gu", + "am", + "yi", + "lo", + "uz", + "fo", + "ht", + "ps", + "tk", + "nn", + "mt", + "sa", + "lb", + "my", + "bo", + "tl", + "mg", + "as", + "tt", + "haw", + "ln", + "ha", + "ba", + "jw", + "su", + "yue", + ] """The language of the input audio. Supplying the input language in diff --git a/src/groq/types/chat/chat_completion_assistant_message_param.py b/src/groq/types/chat/chat_completion_assistant_message_param.py index e1e39948..39a11537 100644 --- a/src/groq/types/chat/chat_completion_assistant_message_param.py +++ b/src/groq/types/chat/chat_completion_assistant_message_param.py @@ -11,7 +11,7 @@ class FunctionCall(TypedDict, total=False): - arguments: Required[str] + arguments: str """ The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may @@ -19,7 +19,7 @@ class FunctionCall(TypedDict, total=False): arguments in your code before calling your function. """ - name: Required[str] + name: str """The name of the function to call.""" @@ -47,5 +47,8 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): role. """ + tool_call_id: Optional[str] + """DO NOT USE. This field is present because OpenAI allows it and userssend it.""" + tool_calls: Iterable[ChatCompletionMessageToolCallParam] """The tool calls generated by the model, such as function calls.""" diff --git a/src/groq/types/chat/chat_completion_function_message_param.py b/src/groq/types/chat/chat_completion_function_message_param.py index 5af12bf9..9c8bb6ff 100644 --- a/src/groq/types/chat/chat_completion_function_message_param.py +++ b/src/groq/types/chat/chat_completion_function_message_param.py @@ -17,3 +17,6 @@ class ChatCompletionFunctionMessageParam(TypedDict, total=False): role: Required[Literal["function"]] """The role of the messages author, in this case `function`.""" + + tool_call_id: Optional[str] + """DO NOT USE. This field is present because OpenAI allows it and users send it.""" diff --git a/src/groq/types/chat/chat_completion_system_message_param.py b/src/groq/types/chat/chat_completion_system_message_param.py index 94bb3f63..7cf2184f 100644 --- a/src/groq/types/chat/chat_completion_system_message_param.py +++ b/src/groq/types/chat/chat_completion_system_message_param.py @@ -2,6 +2,7 @@ from __future__ import annotations +from typing import Optional from typing_extensions import Literal, Required, TypedDict __all__ = ["ChatCompletionSystemMessageParam"] @@ -20,3 +21,6 @@ class ChatCompletionSystemMessageParam(TypedDict, total=False): Provides the model information to differentiate between participants of the same role. """ + + tool_call_id: Optional[str] + """DO NOT USE. This field is present because OpenAI allows it and userssend it.""" diff --git a/src/groq/types/chat/chat_completion_tool_message_param.py b/src/groq/types/chat/chat_completion_tool_message_param.py index 5c590e03..3b6b49aa 100644 --- a/src/groq/types/chat/chat_completion_tool_message_param.py +++ b/src/groq/types/chat/chat_completion_tool_message_param.py @@ -16,3 +16,6 @@ class ChatCompletionToolMessageParam(TypedDict, total=False): tool_call_id: Required[str] """Tool call that this message is responding to.""" + + name: str + """DO NOT USE. This field is present because OpenAI allows it and userssend it.""" diff --git a/src/groq/types/chat/chat_completion_user_message_param.py b/src/groq/types/chat/chat_completion_user_message_param.py index 5c15322a..9c9f5b83 100644 --- a/src/groq/types/chat/chat_completion_user_message_param.py +++ b/src/groq/types/chat/chat_completion_user_message_param.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Union, Iterable +from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict from .chat_completion_content_part_param import ChatCompletionContentPartParam @@ -23,3 +23,6 @@ class ChatCompletionUserMessageParam(TypedDict, total=False): Provides the model information to differentiate between participants of the same role. """ + + tool_call_id: Optional[str] + """DO NOT USE. This field is present because OpenAI allows it and userssend it.""" diff --git a/tests/api_resources/audio/test_transcriptions.py b/tests/api_resources/audio/test_transcriptions.py index b54784b9..8744a2a8 100644 --- a/tests/api_resources/audio/test_transcriptions.py +++ b/tests/api_resources/audio/test_transcriptions.py @@ -30,7 +30,7 @@ def test_method_create_with_all_params(self, client: Groq) -> None: transcription = client.audio.transcriptions.create( file=b"raw file contents", model="whisper-large-v3", - language="string", + language="en", prompt="string", response_format="json", temperature=0, @@ -81,7 +81,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGroq) -> N transcription = await async_client.audio.transcriptions.create( file=b"raw file contents", model="whisper-large-v3", - language="string", + language="en", prompt="string", response_format="json", temperature=0, diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 62f61457..a3f1ead1 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -38,6 +38,7 @@ def test_method_create_with_all_params(self, client: Groq) -> None: "content": "string", "role": "system", "name": "string", + "tool_call_id": "string", } ], model="string", @@ -165,6 +166,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGroq) -> N "content": "string", "role": "system", "name": "string", + "tool_call_id": "string", } ], model="string",