diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ebfa5cee..aea1868f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,10 +2,10 @@ name: CI on: push: branches: - - main + - stainless pull_request: branches: - - main + - stainless jobs: lint: diff --git a/CHANGELOG.md b/CHANGELOG.md index a3b8a298..ced1d0d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,19 +1,21 @@ # Changelog -## 0.4.0 (2024-02-14) +## 0.4.0 (2024-02-15) Full Changelog: [v0.1.0...v0.4.0](https://github.com/groq/groq-python/compare/v0.1.0...v0.4.0) ### Features +* Add initial Stainless SDK ([d5a8512](https://github.com/groq/groq-python/commit/d5a851262e04e625dde130367ed91d8f95683599)) * Add initial Stainless SDK ([316de2c](https://github.com/groq/groq-python/commit/316de2ccfeb76e36fe34bb8656ea90a8d42a7d00)) * create default branch ([7e00266](https://github.com/groq/groq-python/commit/7e00266e3c691d92d508e753e2c14c03297c09f9)) -* update via SDK Studio ([#3](https://github.com/groq/groq-python/issues/3)) ([2241036](https://github.com/groq/groq-python/commit/2241036e9dbee6629ad7ebce5e6f4f5e5f1028ce)) +* update via SDK Studio ([#3](https://github.com/groq/groq-python/issues/3)) ([8d92c08](https://github.com/groq/groq-python/commit/8d92c086e320c2715e02bc79807ff872e84c0b0f)) ### Chores -* go live ([#2](https://github.com/groq/groq-python/issues/2)) ([13665ad](https://github.com/groq/groq-python/commit/13665ad76705513d99cbaa497ccccc694932f2c3)) +* go live ([#2](https://github.com/groq/groq-python/issues/2)) ([ba81c42](https://github.com/groq/groq-python/commit/ba81c42d6d0fd6d47819e0d58962235cb70ca4f1)) +* go live ([#5](https://github.com/groq/groq-python/issues/5)) ([af9a838](https://github.com/groq/groq-python/commit/af9a838e240bb0f7385bc33fb18ce246427ca2f7)) ## 0.1.0 (2024-02-10) diff --git a/README.md b/README.md index 0c14ce0e..5c0abddb 100644 --- a/README.md +++ b/README.md @@ -261,9 +261,9 @@ completion = response.parse() # get the object that `chat.completions.create()` print(completion.id) ``` -These methods return an [`APIResponse`](https://github.com/groq/groq-python/tree/main/src/groq/_response.py) object. +These methods return an [`APIResponse`](https://github.com/groq/groq-python/tree/stainless/src/groq/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/groq/groq-python/tree/main/src/groq/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/groq/groq-python/tree/stainless/src/groq/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` diff --git a/bin/check-release-environment b/bin/check-release-environment index 29306d89..e35a3711 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -6,9 +6,9 @@ if [ -z "${PYPI_TOKEN}" ]; then errors+=("The GROQ_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") fi -len=${#errors[@]} +lenErrors=${#errors[@]} -if [[ len -gt 0 ]]; then +if [[ lenErrors -gt 0 ]]; then echo -e "Found the following errors in the release environment:\n" for error in "${errors[@]}"; do diff --git a/examples/chat_completion_async.py b/examples/chat_completion_async.py index 99b8fc4d..0d485a26 100644 --- a/examples/chat_completion_async.py +++ b/examples/chat_completion_async.py @@ -3,7 +3,7 @@ from groq import AsyncGroq -async def main(): +async def main() -> None: client = AsyncGroq() chat_completion = await client.chat.completions.create( diff --git a/examples/chat_completion_async_streaming.py b/examples/chat_completion_async_streaming.py index 39105f99..3ba5edd3 100644 --- a/examples/chat_completion_async_streaming.py +++ b/examples/chat_completion_async_streaming.py @@ -3,7 +3,7 @@ from groq import AsyncGroq -async def main(): +async def main() -> None: client = AsyncGroq() stream = await client.chat.completions.create( diff --git a/src/groq/lib/chat_completion_chunk.py b/src/groq/lib/chat_completion_chunk.py index e4e3533b..71c433f4 100644 --- a/src/groq/lib/chat_completion_chunk.py +++ b/src/groq/lib/chat_completion_chunk.py @@ -16,6 +16,8 @@ "ChoiceDeltaToolCall", "ChoiceDeltaToolCallFunction", ] + + class ChoiceDeltaFunctionCall(BaseModel): arguments: Optional[str] = None """ @@ -28,6 +30,7 @@ class ChoiceDeltaFunctionCall(BaseModel): name: Optional[str] = None """The name of the function to call.""" + class ChoiceLogprobsContentTopLogprob(BaseModel): token: Optional[str] = None @@ -67,11 +70,12 @@ class ChoiceDeltaToolCall(BaseModel): class ChoiceDelta(BaseModel): - content: Optional[str] = None + content: str + + role: str function_call: Optional[ChoiceDeltaFunctionCall] = None - role: Optional[str] = None tool_calls: Optional[List[ChoiceDeltaToolCall]] = None @@ -79,22 +83,22 @@ class ChoiceDelta(BaseModel): class Choice(BaseModel): delta: ChoiceDelta - finish_reason: Optional[str] = None + finish_reason: str - index: Optional[int] = None + index: int - logprobs: Optional[ChoiceLogprobs] = None + logprobs: ChoiceLogprobs class ChatCompletionChunk(BaseModel): - id: Optional[str] = None + id: str - choices: Optional[List[Choice]] = None + choices: List[Choice] - created: Optional[int] = None + created: int - model: Optional[str] = None + model: str - object: Optional[str] = None + object: str - system_fingerprint: Optional[str] = None \ No newline at end of file + system_fingerprint: str diff --git a/src/groq/resources/chat/completions.py b/src/groq/resources/chat/completions.py index 67b7ab81..017591e2 100644 --- a/src/groq/resources/chat/completions.py +++ b/src/groq/resources/chat/completions.py @@ -2,7 +2,8 @@ from __future__ import annotations -from typing import Dict, List, Union, Literal, Iterable, Optional, overload +from typing import Dict, List, Union, Iterable, Optional, overload +from typing_extensions import Literal import httpx @@ -39,12 +40,12 @@ def with_streaming_response(self) -> CompletionsWithStreamingResponse: def create( self, *, + messages: Iterable[completion_create_params.Message], + model: str, frequency_penalty: float | NotGiven = NOT_GIVEN, logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN, logprobs: bool | NotGiven = NOT_GIVEN, max_tokens: int | NotGiven = NOT_GIVEN, - messages: Iterable[completion_create_params.Message] | NotGiven = NOT_GIVEN, - model: str | NotGiven = NOT_GIVEN, n: int | NotGiven = NOT_GIVEN, presence_penalty: float | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, @@ -70,12 +71,12 @@ def create( def create( self, *, + messages: Iterable[completion_create_params.Message], + model: str, frequency_penalty: float | NotGiven = NOT_GIVEN, logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN, logprobs: bool | NotGiven = NOT_GIVEN, max_tokens: int | NotGiven = NOT_GIVEN, - messages: Iterable[completion_create_params.Message] | NotGiven = NOT_GIVEN, - model: str | NotGiven = NOT_GIVEN, n: int | NotGiven = NOT_GIVEN, presence_penalty: float | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, @@ -101,12 +102,12 @@ def create( def create( self, *, + messages: Iterable[completion_create_params.Message], + model: str, frequency_penalty: float | NotGiven = NOT_GIVEN, logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN, logprobs: bool | NotGiven = NOT_GIVEN, max_tokens: int | NotGiven = NOT_GIVEN, - messages: Iterable[completion_create_params.Message] | NotGiven = NOT_GIVEN, - model: str | NotGiven = NOT_GIVEN, n: int | NotGiven = NOT_GIVEN, presence_penalty: float | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, @@ -131,12 +132,12 @@ def create( def create( self, *, + messages: Iterable[completion_create_params.Message], + model: str, frequency_penalty: float | NotGiven = NOT_GIVEN, logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN, logprobs: bool | NotGiven = NOT_GIVEN, max_tokens: int | NotGiven = NOT_GIVEN, - messages: Iterable[completion_create_params.Message] | NotGiven = NOT_GIVEN, - model: str | NotGiven = NOT_GIVEN, n: int | NotGiven = NOT_GIVEN, presence_penalty: float | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, @@ -175,12 +176,12 @@ def create( "/openai/v1/chat/completions", body=maybe_transform( { + "messages": messages, + "model": model, "frequency_penalty": frequency_penalty, "logit_bias": logit_bias, "logprobs": logprobs, "max_tokens": max_tokens, - "messages": messages, - "model": model, "n": n, "presence_penalty": presence_penalty, "response_format": response_format, @@ -218,12 +219,12 @@ def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse: async def create( self, *, + messages: Iterable[completion_create_params.Message], + model: str, frequency_penalty: float | NotGiven = NOT_GIVEN, logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN, logprobs: bool | NotGiven = NOT_GIVEN, max_tokens: int | NotGiven = NOT_GIVEN, - messages: Iterable[completion_create_params.Message] | NotGiven = NOT_GIVEN, - model: str | NotGiven = NOT_GIVEN, n: int | NotGiven = NOT_GIVEN, presence_penalty: float | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, @@ -249,12 +250,12 @@ async def create( async def create( self, *, + messages: Iterable[completion_create_params.Message], + model: str, frequency_penalty: float | NotGiven = NOT_GIVEN, logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN, logprobs: bool | NotGiven = NOT_GIVEN, max_tokens: int | NotGiven = NOT_GIVEN, - messages: Iterable[completion_create_params.Message] | NotGiven = NOT_GIVEN, - model: str | NotGiven = NOT_GIVEN, n: int | NotGiven = NOT_GIVEN, presence_penalty: float | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, @@ -280,12 +281,12 @@ async def create( async def create( self, *, + messages: Iterable[completion_create_params.Message], + model: str, frequency_penalty: float | NotGiven = NOT_GIVEN, logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN, logprobs: bool | NotGiven = NOT_GIVEN, max_tokens: int | NotGiven = NOT_GIVEN, - messages: Iterable[completion_create_params.Message] | NotGiven = NOT_GIVEN, - model: str | NotGiven = NOT_GIVEN, n: int | NotGiven = NOT_GIVEN, presence_penalty: float | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, @@ -310,12 +311,12 @@ async def create( async def create( self, *, + messages: Iterable[completion_create_params.Message], + model: str, frequency_penalty: float | NotGiven = NOT_GIVEN, logit_bias: Dict[str, int] | NotGiven = NOT_GIVEN, logprobs: bool | NotGiven = NOT_GIVEN, max_tokens: int | NotGiven = NOT_GIVEN, - messages: Iterable[completion_create_params.Message] | NotGiven = NOT_GIVEN, - model: str | NotGiven = NOT_GIVEN, n: int | NotGiven = NOT_GIVEN, presence_penalty: float | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, @@ -354,12 +355,12 @@ async def create( "/openai/v1/chat/completions", body=maybe_transform( { + "messages": messages, + "model": model, "frequency_penalty": frequency_penalty, "logit_bias": logit_bias, "logprobs": logprobs, "max_tokens": max_tokens, - "messages": messages, - "model": model, "n": n, "presence_penalty": presence_penalty, "response_format": response_format, diff --git a/src/groq/types/chat/chat_completion.py b/src/groq/types/chat/chat_completion.py index 77365e4a..1ff1f5d5 100644 --- a/src/groq/types/chat/chat_completion.py +++ b/src/groq/types/chat/chat_completion.py @@ -54,21 +54,21 @@ class ChoiceMessageToolCall(BaseModel): class ChoiceMessage(BaseModel): - content: Optional[str] = None + content: str - role: Optional[str] = None + role: str tool_calls: Optional[List[ChoiceMessageToolCall]] = None class Choice(BaseModel): - finish_reason: Optional[str] = None + finish_reason: str - index: Optional[int] = None + index: int - logprobs: Optional[ChoiceLogprobs] = None + logprobs: ChoiceLogprobs - message: Optional[ChoiceMessage] = None + message: ChoiceMessage class Usage(BaseModel): @@ -86,9 +86,9 @@ class Usage(BaseModel): class ChatCompletion(BaseModel): - id: Optional[str] = None + choices: List[Choice] - choices: Optional[List[Choice]] = None + id: Optional[str] = None created: Optional[int] = None diff --git a/src/groq/types/chat/completion_create_params.py b/src/groq/types/chat/completion_create_params.py index 1aea7838..0f9712bc 100644 --- a/src/groq/types/chat/completion_create_params.py +++ b/src/groq/types/chat/completion_create_params.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Annotated, TypedDict +from typing_extensions import Required, Annotated, TypedDict from ..._utils import PropertyInfo @@ -22,6 +22,10 @@ class CompletionCreateParams(TypedDict, total=False): + messages: Required[Iterable[Message]] + + model: Required[str] + frequency_penalty: float logit_bias: Dict[str, int] @@ -30,10 +34,6 @@ class CompletionCreateParams(TypedDict, total=False): max_tokens: int - messages: Iterable[Message] - - model: str - n: int presence_penalty: float @@ -78,11 +78,11 @@ class MessageToolCall(TypedDict, total=False): class Message(TypedDict, total=False): - content: str + content: Required[str] - name: str + role: Required[str] - role: str + name: str tool_call_id: str """ToolMessage Fields""" diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 1eabfe70..1fdfc34b 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -19,16 +19,28 @@ class TestCompletions: @parametrize def test_method_create(self, client: Groq) -> None: - completion = client.chat.completions.create() + completion = client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "string", + }, + { + "content": "string", + "role": "string", + }, + { + "content": "string", + "role": "string", + }, + ], + model="string", + ) assert_matches_type(ChatCompletion, completion, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: Groq) -> None: completion = client.chat.completions.create( - frequency_penalty=0, - logit_bias={"foo": 0}, - logprobs=True, - max_tokens=0, messages=[ { "content": "string", @@ -128,6 +140,10 @@ def test_method_create_with_all_params(self, client: Groq) -> None: }, ], model="string", + frequency_penalty=0, + logit_bias={"foo": 0}, + logprobs=True, + max_tokens=0, n=0, presence_penalty=0, response_format={"type": "string"}, @@ -176,7 +192,23 @@ def test_method_create_with_all_params(self, client: Groq) -> None: @parametrize def test_raw_response_create(self, client: Groq) -> None: - response = client.chat.completions.with_raw_response.create() + response = client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "string", + }, + { + "content": "string", + "role": "string", + }, + { + "content": "string", + "role": "string", + }, + ], + model="string", + ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -185,7 +217,23 @@ def test_raw_response_create(self, client: Groq) -> None: @parametrize def test_streaming_response_create(self, client: Groq) -> None: - with client.chat.completions.with_streaming_response.create() as response: + with client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "string", + }, + { + "content": "string", + "role": "string", + }, + { + "content": "string", + "role": "string", + }, + ], + model="string", + ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -200,16 +248,28 @@ class TestAsyncCompletions: @parametrize async def test_method_create(self, async_client: AsyncGroq) -> None: - completion = await async_client.chat.completions.create() + completion = await async_client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "string", + }, + { + "content": "string", + "role": "string", + }, + { + "content": "string", + "role": "string", + }, + ], + model="string", + ) assert_matches_type(ChatCompletion, completion, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGroq) -> None: completion = await async_client.chat.completions.create( - frequency_penalty=0, - logit_bias={"foo": 0}, - logprobs=True, - max_tokens=0, messages=[ { "content": "string", @@ -309,6 +369,10 @@ async def test_method_create_with_all_params(self, async_client: AsyncGroq) -> N }, ], model="string", + frequency_penalty=0, + logit_bias={"foo": 0}, + logprobs=True, + max_tokens=0, n=0, presence_penalty=0, response_format={"type": "string"}, @@ -357,7 +421,23 @@ async def test_method_create_with_all_params(self, async_client: AsyncGroq) -> N @parametrize async def test_raw_response_create(self, async_client: AsyncGroq) -> None: - response = await async_client.chat.completions.with_raw_response.create() + response = await async_client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "string", + }, + { + "content": "string", + "role": "string", + }, + { + "content": "string", + "role": "string", + }, + ], + model="string", + ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -366,7 +446,23 @@ async def test_raw_response_create(self, async_client: AsyncGroq) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncGroq) -> None: - async with async_client.chat.completions.with_streaming_response.create() as response: + async with async_client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "string", + }, + { + "content": "string", + "role": "string", + }, + { + "content": "string", + "role": "string", + }, + ], + model="string", + ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python"