Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ from arcadepy import Arcade
client = Arcade()

try:
client.chat.completions.completions(
client.chat.completions.create(
messages=[
{
"role": "user",
Expand Down Expand Up @@ -154,7 +154,7 @@ client = Arcade(
)

# Or, configure per-request:
client.with_options(max_retries=5).chat.completions.completions(
client.with_options(max_retries=5).chat.completions.create(
messages=[
{
"role": "user",
Expand Down Expand Up @@ -184,7 +184,7 @@ client = Arcade(
)

# Override per-request:
client.with_options(timeout=5.0).chat.completions.completions(
client.with_options(timeout=5.0).chat.completions.create(
messages=[
{
"role": "user",
Expand Down Expand Up @@ -230,15 +230,15 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to
from arcadepy import Arcade

client = Arcade()
response = client.chat.completions.with_raw_response.completions(
response = client.chat.completions.with_raw_response.create(
messages=[{
"role": "user",
"content": "Hello, how can I use Arcade AI?",
}],
)
print(response.headers.get('X-My-Header'))

completion = response.parse() # get the object that `chat.completions.completions()` would have returned
completion = response.parse() # get the object that `chat.completions.create()` would have returned
print(completion.id)
```

Expand All @@ -253,7 +253,7 @@ The above interface eagerly reads the full response body when you make the reque
To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods.

```python
with client.chat.completions.with_streaming_response.completions(
with client.chat.completions.with_streaming_response.create(
messages=[
{
"role": "user",
Expand Down
2 changes: 1 addition & 1 deletion api.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ from arcadepy.types import ChatMessage, ChatRequest, ChatResponse, Choice, Usage

Methods:

- <code title="post /v1/chat/completions">client.chat.completions.<a href="./src/arcadepy/resources/chat/completions.py">completions</a>(\*\*<a href="src/arcadepy/types/chat/completion_completions_params.py">params</a>) -> <a href="./src/arcadepy/types/chat_response.py">ChatResponse</a></code>
- <code title="post /v1/chat/completions">client.chat.completions.<a href="./src/arcadepy/resources/chat/completions.py">create</a>(\*\*<a href="src/arcadepy/types/chat/completion_create_params.py">params</a>) -> <a href="./src/arcadepy/types/chat_response.py">ChatResponse</a></code>

# Tools

Expand Down
30 changes: 15 additions & 15 deletions src/arcadepy/resources/chat/completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
from ...types.chat import completion_completions_params
from ...types.chat import completion_create_params
from ..._base_client import make_request_options
from ...types.chat_response import ChatResponse
from ...types.chat_message_param import ChatMessageParam
Expand Down Expand Up @@ -48,7 +48,7 @@ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse:
"""
return CompletionsResourceWithStreamingResponse(self)

def completions(
def create(
self,
*,
frequency_penalty: int | NotGiven = NOT_GIVEN,
Expand All @@ -64,7 +64,7 @@ def completions(
seed: int | NotGiven = NOT_GIVEN,
stop: List[str] | NotGiven = NOT_GIVEN,
stream: bool | NotGiven = NOT_GIVEN,
stream_options: completion_completions_params.StreamOptions | NotGiven = NOT_GIVEN,
stream_options: completion_create_params.StreamOptions | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tool_choice: object | NotGiven = NOT_GIVEN,
tools: object | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -138,7 +138,7 @@ def completions(
"top_p": top_p,
"user": user,
},
completion_completions_params.CompletionCompletionsParams,
completion_create_params.CompletionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers,
Expand Down Expand Up @@ -171,7 +171,7 @@ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingRespon
"""
return AsyncCompletionsResourceWithStreamingResponse(self)

async def completions(
async def create(
self,
*,
frequency_penalty: int | NotGiven = NOT_GIVEN,
Expand All @@ -187,7 +187,7 @@ async def completions(
seed: int | NotGiven = NOT_GIVEN,
stop: List[str] | NotGiven = NOT_GIVEN,
stream: bool | NotGiven = NOT_GIVEN,
stream_options: completion_completions_params.StreamOptions | NotGiven = NOT_GIVEN,
stream_options: completion_create_params.StreamOptions | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
tool_choice: object | NotGiven = NOT_GIVEN,
tools: object | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -261,7 +261,7 @@ async def completions(
"top_p": top_p,
"user": user,
},
completion_completions_params.CompletionCompletionsParams,
completion_create_params.CompletionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers,
Expand All @@ -278,33 +278,33 @@ class CompletionsResourceWithRawResponse:
def __init__(self, completions: CompletionsResource) -> None:
self._completions = completions

self.completions = to_raw_response_wrapper(
completions.completions,
self.create = to_raw_response_wrapper(
completions.create,
)


class AsyncCompletionsResourceWithRawResponse:
def __init__(self, completions: AsyncCompletionsResource) -> None:
self._completions = completions

self.completions = async_to_raw_response_wrapper(
completions.completions,
self.create = async_to_raw_response_wrapper(
completions.create,
)


class CompletionsResourceWithStreamingResponse:
def __init__(self, completions: CompletionsResource) -> None:
self._completions = completions

self.completions = to_streamed_response_wrapper(
completions.completions,
self.create = to_streamed_response_wrapper(
completions.create,
)


class AsyncCompletionsResourceWithStreamingResponse:
def __init__(self, completions: AsyncCompletionsResource) -> None:
self._completions = completions

self.completions = async_to_streamed_response_wrapper(
completions.completions,
self.create = async_to_streamed_response_wrapper(
completions.create,
)
2 changes: 1 addition & 1 deletion src/arcadepy/types/chat/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@

from __future__ import annotations

from .completion_completions_params import CompletionCompletionsParams as CompletionCompletionsParams
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
82 changes: 82 additions & 0 deletions src/arcadepy/types/chat/completion_create_params.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from __future__ import annotations

from typing import Dict, List, Iterable
from typing_extensions import Literal, TypedDict

from ..chat_message_param import ChatMessageParam

__all__ = ["CompletionCreateParams", "StreamOptions"]


class CompletionCreateParams(TypedDict, total=False):
frequency_penalty: int

logit_bias: Dict[str, int]
"""
LogitBias is must be a token id string (specified by their token ID in the
tokenizer), not a word string. incorrect: `"logit_bias":{"You": 6}`, correct:
`"logit_bias":{"1639": 6}` refs:
https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias
"""

logprobs: bool
"""
LogProbs indicates whether to return log probabilities of the output tokens or
not. If true, returns the log probabilities of each output token returned in the
content of message. This option is currently not available on the
gpt-4-vision-preview model.
"""

max_tokens: int

messages: Iterable[ChatMessageParam]

model: str

n: int

parallel_tool_calls: object
"""Disable the default behavior of parallel tool calls by setting it: false."""

presence_penalty: int

response_format: Literal["json_object", "text"]

seed: int

stop: List[str]

stream: bool

stream_options: StreamOptions
"""Options for streaming response. Only set this when you set stream: true."""

temperature: float

tool_choice: object
"""This can be either a string or an ToolChoice object."""

tools: object

top_logprobs: int
"""
TopLogProbs is an integer between 0 and 5 specifying the number of most likely
tokens to return at each token position, each with an associated log
probability. logprobs must be set to true if this parameter is used.
"""

top_p: float

user: str


class StreamOptions(TypedDict, total=False):
include_usage: bool
"""
If set, an additional chunk will be streamed before the data: [DONE] message.
The usage field on this chunk shows the token usage statistics for the entire
request, and the choices field will always be an empty array. All other chunks
will also include a usage field, but with a null value.
"""
32 changes: 16 additions & 16 deletions tests/api_resources/chat/test_completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,13 @@ class TestCompletions:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])

@parametrize
def test_method_completions(self, client: Arcade) -> None:
completion = client.chat.completions.completions()
def test_method_create(self, client: Arcade) -> None:
completion = client.chat.completions.create()
assert_matches_type(ChatResponse, completion, path=["response"])

@parametrize
def test_method_completions_with_all_params(self, client: Arcade) -> None:
completion = client.chat.completions.completions(
def test_method_create_with_all_params(self, client: Arcade) -> None:
completion = client.chat.completions.create(
frequency_penalty=0,
logit_bias={"foo": 0},
logprobs=True,
Expand Down Expand Up @@ -146,17 +146,17 @@ def test_method_completions_with_all_params(self, client: Arcade) -> None:
assert_matches_type(ChatResponse, completion, path=["response"])

@parametrize
def test_raw_response_completions(self, client: Arcade) -> None:
response = client.chat.completions.with_raw_response.completions()
def test_raw_response_create(self, client: Arcade) -> None:
response = client.chat.completions.with_raw_response.create()

assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = response.parse()
assert_matches_type(ChatResponse, completion, path=["response"])

@parametrize
def test_streaming_response_completions(self, client: Arcade) -> None:
with client.chat.completions.with_streaming_response.completions() as response:
def test_streaming_response_create(self, client: Arcade) -> None:
with client.chat.completions.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"

Expand All @@ -170,13 +170,13 @@ class TestAsyncCompletions:
parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])

@parametrize
async def test_method_completions(self, async_client: AsyncArcade) -> None:
completion = await async_client.chat.completions.completions()
async def test_method_create(self, async_client: AsyncArcade) -> None:
completion = await async_client.chat.completions.create()
assert_matches_type(ChatResponse, completion, path=["response"])

@parametrize
async def test_method_completions_with_all_params(self, async_client: AsyncArcade) -> None:
completion = await async_client.chat.completions.completions(
async def test_method_create_with_all_params(self, async_client: AsyncArcade) -> None:
completion = await async_client.chat.completions.create(
frequency_penalty=0,
logit_bias={"foo": 0},
logprobs=True,
Expand Down Expand Up @@ -298,17 +298,17 @@ async def test_method_completions_with_all_params(self, async_client: AsyncArcad
assert_matches_type(ChatResponse, completion, path=["response"])

@parametrize
async def test_raw_response_completions(self, async_client: AsyncArcade) -> None:
response = await async_client.chat.completions.with_raw_response.completions()
async def test_raw_response_create(self, async_client: AsyncArcade) -> None:
response = await async_client.chat.completions.with_raw_response.create()

assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = await response.parse()
assert_matches_type(ChatResponse, completion, path=["response"])

@parametrize
async def test_streaming_response_completions(self, async_client: AsyncArcade) -> None:
async with async_client.chat.completions.with_streaming_response.completions() as response:
async def test_streaming_response_create(self, async_client: AsyncArcade) -> None:
async with async_client.chat.completions.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"

Expand Down
16 changes: 6 additions & 10 deletions tests/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -800,7 +800,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:

respx_mock.post("/v1/chat/completions").mock(side_effect=retry_handler)

response = client.chat.completions.with_raw_response.completions()
response = client.chat.completions.with_raw_response.create()

assert response.retries_taken == failures_before_success
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
Expand All @@ -824,9 +824,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:

respx_mock.post("/v1/chat/completions").mock(side_effect=retry_handler)

response = client.chat.completions.with_raw_response.completions(
extra_headers={"x-stainless-retry-count": Omit()}
)
response = client.chat.completions.with_raw_response.create(extra_headers={"x-stainless-retry-count": Omit()})

assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0

Expand All @@ -849,9 +847,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:

respx_mock.post("/v1/chat/completions").mock(side_effect=retry_handler)

response = client.chat.completions.with_raw_response.completions(
extra_headers={"x-stainless-retry-count": "42"}
)
response = client.chat.completions.with_raw_response.create(extra_headers={"x-stainless-retry-count": "42"})

assert response.http_request.headers.get("x-stainless-retry-count") == "42"

Expand Down Expand Up @@ -1625,7 +1621,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:

respx_mock.post("/v1/chat/completions").mock(side_effect=retry_handler)

response = await client.chat.completions.with_raw_response.completions()
response = await client.chat.completions.with_raw_response.create()

assert response.retries_taken == failures_before_success
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
Expand All @@ -1650,7 +1646,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:

respx_mock.post("/v1/chat/completions").mock(side_effect=retry_handler)

response = await client.chat.completions.with_raw_response.completions(
response = await client.chat.completions.with_raw_response.create(
extra_headers={"x-stainless-retry-count": Omit()}
)

Expand All @@ -1676,7 +1672,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:

respx_mock.post("/v1/chat/completions").mock(side_effect=retry_handler)

response = await client.chat.completions.with_raw_response.completions(
response = await client.chat.completions.with_raw_response.create(
extra_headers={"x-stainless-retry-count": "42"}
)

Expand Down