From c2b96ce3d95cc9b74bffd8d6a499927eefd23b14 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 3 Jul 2025 07:37:47 +0000
Subject: [PATCH 01/13] feat(api): manual updates
Add GRADIENTAI_AGENT_KEY
---
.stats.yml | 2 +-
src/gradientai/_client.py | 18 +++++
tests/conftest.py | 8 +-
tests/test_client.py | 149 +++++++++++++++++++++++++++++++++-----
4 files changed, 157 insertions(+), 20 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 0a6b7a71..9ebb83f9 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 76
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml
openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208
-config_hash: 1c936b3bd798c3fcb25479b19efa999a
+config_hash: 9c2e548d86a376bc5f6c458de6944504
diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py
index 2dc19e49..c696258b 100644
--- a/src/gradientai/_client.py
+++ b/src/gradientai/_client.py
@@ -56,6 +56,7 @@ class GradientAI(SyncAPIClient):
# client options
api_key: str | None
inference_key: str | None
+ agent_key: str | None
agent_domain: str | None
def __init__(
@@ -63,6 +64,7 @@ def __init__(
*,
api_key: str | None = None,
inference_key: str | None = None,
+ agent_key: str | None = None,
agent_domain: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN,
@@ -88,6 +90,7 @@ def __init__(
This automatically infers the following arguments from their corresponding environment variables if they are not provided:
- `api_key` from `GRADIENTAI_API_KEY`
- `inference_key` from `GRADIENTAI_INFERENCE_KEY`
+ - `agent_key` from `GRADIENTAI_AGENT_KEY`
"""
if api_key is None:
api_key = os.environ.get("GRADIENTAI_API_KEY")
@@ -97,6 +100,10 @@ def __init__(
inference_key = os.environ.get("GRADIENTAI_INFERENCE_KEY")
self.inference_key = inference_key
+ if agent_key is None:
+ agent_key = os.environ.get("GRADIENTAI_AGENT_KEY")
+ self.agent_key = agent_key
+
self.agent_domain = agent_domain
if base_url is None:
@@ -200,6 +207,7 @@ def copy(
*,
api_key: str | None = None,
inference_key: str | None = None,
+ agent_key: str | None = None,
agent_domain: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
@@ -236,6 +244,7 @@ def copy(
client = self.__class__(
api_key=api_key or self.api_key,
inference_key=inference_key or self.inference_key,
+ agent_key=agent_key or self.agent_key,
agent_domain=agent_domain or self.agent_domain,
base_url=base_url or self.base_url,
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
@@ -290,6 +299,7 @@ class AsyncGradientAI(AsyncAPIClient):
# client options
api_key: str | None
inference_key: str | None
+ agent_key: str | None
agent_domain: str | None
def __init__(
@@ -297,6 +307,7 @@ def __init__(
*,
api_key: str | None = None,
inference_key: str | None = None,
+ agent_key: str | None = None,
agent_domain: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN,
@@ -322,6 +333,7 @@ def __init__(
This automatically infers the following arguments from their corresponding environment variables if they are not provided:
- `api_key` from `GRADIENTAI_API_KEY`
- `inference_key` from `GRADIENTAI_INFERENCE_KEY`
+ - `agent_key` from `GRADIENTAI_AGENT_KEY`
"""
if api_key is None:
api_key = os.environ.get("GRADIENTAI_API_KEY")
@@ -331,6 +343,10 @@ def __init__(
inference_key = os.environ.get("GRADIENTAI_INFERENCE_KEY")
self.inference_key = inference_key
+ if agent_key is None:
+ agent_key = os.environ.get("GRADIENTAI_AGENT_KEY")
+ self.agent_key = agent_key
+
self.agent_domain = agent_domain
if base_url is None:
@@ -434,6 +450,7 @@ def copy(
*,
api_key: str | None = None,
inference_key: str | None = None,
+ agent_key: str | None = None,
agent_domain: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
@@ -470,6 +487,7 @@ def copy(
client = self.__class__(
api_key=api_key or self.api_key,
inference_key=inference_key or self.inference_key,
+ agent_key=agent_key or self.agent_key,
agent_domain=agent_domain or self.agent_domain,
base_url=base_url or self.base_url,
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
diff --git a/tests/conftest.py b/tests/conftest.py
index 39547c5d..5b24e1c2 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -47,6 +47,7 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None:
api_key = "My API Key"
inference_key = "My Inference Key"
+agent_key = "My Agent Key"
@pytest.fixture(scope="session")
@@ -56,7 +57,11 @@ def client(request: FixtureRequest) -> Iterator[GradientAI]:
raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}")
with GradientAI(
- base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=strict
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=strict,
) as client:
yield client
@@ -85,6 +90,7 @@ async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncGradientAI
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=strict,
http_client=http_client,
) as client:
diff --git a/tests/test_client.py b/tests/test_client.py
index 16220895..c901e2c8 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -40,6 +40,7 @@
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
api_key = "My API Key"
inference_key = "My Inference Key"
+agent_key = "My Agent Key"
def _get_params(client: BaseClient[Any, Any]) -> dict[str, str]:
@@ -62,7 +63,11 @@ def _get_open_connections(client: GradientAI | AsyncGradientAI) -> int:
class TestGradientAI:
client = GradientAI(
- base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=True,
)
@pytest.mark.respx(base_url=base_url)
@@ -97,6 +102,10 @@ def test_copy(self) -> None:
assert copied.inference_key == "another My Inference Key"
assert self.client.inference_key == "My Inference Key"
+ copied = self.client.copy(agent_key="another My Agent Key")
+ assert copied.agent_key == "another My Agent Key"
+ assert self.client.agent_key == "My Agent Key"
+
def test_copy_default_options(self) -> None:
# options that have a default are overridden correctly
copied = self.client.copy(max_retries=7)
@@ -118,6 +127,7 @@ def test_copy_default_headers(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
default_headers={"X-Foo": "bar"},
)
@@ -156,6 +166,7 @@ def test_copy_default_query(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
default_query={"foo": "bar"},
)
@@ -286,6 +297,7 @@ def test_client_timeout_option(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
timeout=httpx.Timeout(0),
)
@@ -301,6 +313,7 @@ def test_http_client_timeout_option(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
http_client=http_client,
)
@@ -315,6 +328,7 @@ def test_http_client_timeout_option(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
http_client=http_client,
)
@@ -329,6 +343,7 @@ def test_http_client_timeout_option(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
http_client=http_client,
)
@@ -344,6 +359,7 @@ async def test_invalid_http_client(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
http_client=cast(Any, http_client),
)
@@ -353,6 +369,7 @@ def test_default_headers_option(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
default_headers={"X-Foo": "bar"},
)
@@ -364,6 +381,7 @@ def test_default_headers_option(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
default_headers={
"X-Foo": "stainless",
@@ -376,14 +394,22 @@ def test_default_headers_option(self) -> None:
def test_validate_headers(self) -> None:
client = GradientAI(
- base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=True,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("Authorization") == f"Bearer {api_key}"
with update_env(**{"GRADIENTAI_API_KEY": Omit()}):
client2 = GradientAI(
- base_url=base_url, api_key=None, inference_key=inference_key, _strict_response_validation=True
+ base_url=base_url,
+ api_key=None,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=True,
)
with pytest.raises(
@@ -402,6 +428,7 @@ def test_default_query_option(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
default_query={"query_param": "bar"},
)
@@ -607,6 +634,7 @@ def test_base_url_setter(self) -> None:
base_url="https://example.com/from_init",
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
)
assert client.base_url == "https://example.com/from_init/"
@@ -617,7 +645,9 @@ def test_base_url_setter(self) -> None:
def test_base_url_env(self) -> None:
with update_env(GRADIENT_AI_BASE_URL="http://localhost:5000/from/env"):
- client = GradientAI(api_key=api_key, inference_key=inference_key, _strict_response_validation=True)
+ client = GradientAI(
+ api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True
+ )
assert client.base_url == "http://localhost:5000/from/env/"
@pytest.mark.parametrize(
@@ -627,12 +657,14 @@ def test_base_url_env(self) -> None:
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
),
GradientAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
http_client=httpx.Client(),
),
@@ -656,12 +688,14 @@ def test_base_url_trailing_slash(self, client: GradientAI) -> None:
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
),
GradientAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
http_client=httpx.Client(),
),
@@ -685,12 +719,14 @@ def test_base_url_no_trailing_slash(self, client: GradientAI) -> None:
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
),
GradientAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
http_client=httpx.Client(),
),
@@ -709,7 +745,11 @@ def test_absolute_request_url(self, client: GradientAI) -> None:
def test_copied_client_does_not_close_http(self) -> None:
client = GradientAI(
- base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=True,
)
assert not client.is_closed()
@@ -722,7 +762,11 @@ def test_copied_client_does_not_close_http(self) -> None:
def test_client_context_manager(self) -> None:
client = GradientAI(
- base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=True,
)
with client as c2:
assert c2 is client
@@ -748,6 +792,7 @@ def test_client_max_retries_validation(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
max_retries=cast(Any, None),
)
@@ -771,14 +816,22 @@ class Model(BaseModel):
respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format"))
strict_client = GradientAI(
- base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=True,
)
with pytest.raises(APIResponseValidationError):
strict_client.get("/foo", cast_to=Model)
client = GradientAI(
- base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=False
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=False,
)
response = client.get("/foo", cast_to=Model)
@@ -808,7 +861,11 @@ class Model(BaseModel):
@mock.patch("time.time", mock.MagicMock(return_value=1696004797))
def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None:
client = GradientAI(
- base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=True,
)
headers = httpx.Headers({"retry-after": retry_after})
@@ -1006,7 +1063,11 @@ def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None:
class TestAsyncGradientAI:
client = AsyncGradientAI(
- base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=True,
)
@pytest.mark.respx(base_url=base_url)
@@ -1043,6 +1104,10 @@ def test_copy(self) -> None:
assert copied.inference_key == "another My Inference Key"
assert self.client.inference_key == "My Inference Key"
+ copied = self.client.copy(agent_key="another My Agent Key")
+ assert copied.agent_key == "another My Agent Key"
+ assert self.client.agent_key == "My Agent Key"
+
def test_copy_default_options(self) -> None:
# options that have a default are overridden correctly
copied = self.client.copy(max_retries=7)
@@ -1064,6 +1129,7 @@ def test_copy_default_headers(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
default_headers={"X-Foo": "bar"},
)
@@ -1102,6 +1168,7 @@ def test_copy_default_query(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
default_query={"foo": "bar"},
)
@@ -1232,6 +1299,7 @@ async def test_client_timeout_option(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
timeout=httpx.Timeout(0),
)
@@ -1247,6 +1315,7 @@ async def test_http_client_timeout_option(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
http_client=http_client,
)
@@ -1261,6 +1330,7 @@ async def test_http_client_timeout_option(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
http_client=http_client,
)
@@ -1275,6 +1345,7 @@ async def test_http_client_timeout_option(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
http_client=http_client,
)
@@ -1290,6 +1361,7 @@ def test_invalid_http_client(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
http_client=cast(Any, http_client),
)
@@ -1299,6 +1371,7 @@ def test_default_headers_option(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
default_headers={"X-Foo": "bar"},
)
@@ -1310,6 +1383,7 @@ def test_default_headers_option(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
default_headers={
"X-Foo": "stainless",
@@ -1322,14 +1396,22 @@ def test_default_headers_option(self) -> None:
def test_validate_headers(self) -> None:
client = AsyncGradientAI(
- base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=True,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("Authorization") == f"Bearer {api_key}"
with update_env(**{"GRADIENTAI_API_KEY": Omit()}):
client2 = AsyncGradientAI(
- base_url=base_url, api_key=None, inference_key=inference_key, _strict_response_validation=True
+ base_url=base_url,
+ api_key=None,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=True,
)
with pytest.raises(
@@ -1348,6 +1430,7 @@ def test_default_query_option(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
default_query={"query_param": "bar"},
)
@@ -1553,6 +1636,7 @@ def test_base_url_setter(self) -> None:
base_url="https://example.com/from_init",
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
)
assert client.base_url == "https://example.com/from_init/"
@@ -1563,7 +1647,9 @@ def test_base_url_setter(self) -> None:
def test_base_url_env(self) -> None:
with update_env(GRADIENT_AI_BASE_URL="http://localhost:5000/from/env"):
- client = AsyncGradientAI(api_key=api_key, inference_key=inference_key, _strict_response_validation=True)
+ client = AsyncGradientAI(
+ api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True
+ )
assert client.base_url == "http://localhost:5000/from/env/"
@pytest.mark.parametrize(
@@ -1573,12 +1659,14 @@ def test_base_url_env(self) -> None:
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
),
AsyncGradientAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
),
@@ -1602,12 +1690,14 @@ def test_base_url_trailing_slash(self, client: AsyncGradientAI) -> None:
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
),
AsyncGradientAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
),
@@ -1631,12 +1721,14 @@ def test_base_url_no_trailing_slash(self, client: AsyncGradientAI) -> None:
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
),
AsyncGradientAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
),
@@ -1655,7 +1747,11 @@ def test_absolute_request_url(self, client: AsyncGradientAI) -> None:
async def test_copied_client_does_not_close_http(self) -> None:
client = AsyncGradientAI(
- base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=True,
)
assert not client.is_closed()
@@ -1669,7 +1765,11 @@ async def test_copied_client_does_not_close_http(self) -> None:
async def test_client_context_manager(self) -> None:
client = AsyncGradientAI(
- base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=True,
)
async with client as c2:
assert c2 is client
@@ -1696,6 +1796,7 @@ async def test_client_max_retries_validation(self) -> None:
base_url=base_url,
api_key=api_key,
inference_key=inference_key,
+ agent_key=agent_key,
_strict_response_validation=True,
max_retries=cast(Any, None),
)
@@ -1721,14 +1822,22 @@ class Model(BaseModel):
respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format"))
strict_client = AsyncGradientAI(
- base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=True,
)
with pytest.raises(APIResponseValidationError):
await strict_client.get("/foo", cast_to=Model)
client = AsyncGradientAI(
- base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=False
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=False,
)
response = await client.get("/foo", cast_to=Model)
@@ -1759,7 +1868,11 @@ class Model(BaseModel):
@pytest.mark.asyncio
async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None:
client = AsyncGradientAI(
- base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ agent_key=agent_key,
+ _strict_response_validation=True,
)
headers = httpx.Headers({"retry-after": retry_after})
From d67371f9f4d0761ea03097820bc3e77654b4d2bf Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 3 Jul 2025 13:53:57 +0000
Subject: [PATCH 02/13] feat(api): share chat completion chunk model between
chat and agent.chat
---
.stats.yml | 2 +-
api.md | 6 +-
.../resources/agents/chat/completions.py | 18 ++--
src/gradientai/resources/chat/completions.py | 2 +-
src/gradientai/types/__init__.py | 7 +-
src/gradientai/types/agents/chat/__init__.py | 1 -
.../chat/agent_chat_completion_chunk.py | 93 -------------------
src/gradientai/types/chat/__init__.py | 1 -
src/gradientai/types/shared/__init__.py | 1 +
.../{chat => shared}/chat_completion_chunk.py | 2 +-
10 files changed, 22 insertions(+), 111 deletions(-)
delete mode 100644 src/gradientai/types/agents/chat/agent_chat_completion_chunk.py
rename src/gradientai/types/{chat => shared}/chat_completion_chunk.py (97%)
diff --git a/.stats.yml b/.stats.yml
index 9ebb83f9..bfeef284 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 76
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml
openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208
-config_hash: 9c2e548d86a376bc5f6c458de6944504
+config_hash: 6ed9ee8d3f0d6392816bfaf9dc4894a6
diff --git a/api.md b/api.md
index 65699eaa..af2e8a33 100644
--- a/api.md
+++ b/api.md
@@ -1,7 +1,7 @@
# Shared Types
```python
-from gradientai.types import APILinks, APIMeta, ChatCompletionTokenLogprob
+from gradientai.types import APILinks, APIMeta, ChatCompletionChunk, ChatCompletionTokenLogprob
```
# Agents
@@ -65,7 +65,7 @@ Methods:
Types:
```python
-from gradientai.types.agents.chat import AgentChatCompletionChunk, CompletionCreateResponse
+from gradientai.types.agents.chat import CompletionCreateResponse
```
Methods:
@@ -260,7 +260,7 @@ Methods:
Types:
```python
-from gradientai.types.chat import ChatCompletionChunk, CompletionCreateResponse
+from gradientai.types.chat import CompletionCreateResponse
```
Methods:
diff --git a/src/gradientai/resources/agents/chat/completions.py b/src/gradientai/resources/agents/chat/completions.py
index 7acba243..5923ef2a 100644
--- a/src/gradientai/resources/agents/chat/completions.py
+++ b/src/gradientai/resources/agents/chat/completions.py
@@ -20,8 +20,8 @@
from ...._streaming import Stream, AsyncStream
from ...._base_client import make_request_options
from ....types.agents.chat import completion_create_params
+from ....types.shared.chat_completion_chunk import ChatCompletionChunk
from ....types.agents.chat.completion_create_response import CompletionCreateResponse
-from ....types.agents.chat.agent_chat_completion_chunk import AgentChatCompletionChunk
__all__ = ["CompletionsResource", "AsyncCompletionsResource"]
@@ -186,7 +186,7 @@ def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Stream[AgentChatCompletionChunk]:
+ ) -> Stream[ChatCompletionChunk]:
"""
Creates a model response for the given chat conversation.
@@ -299,7 +299,7 @@ def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionCreateResponse | Stream[AgentChatCompletionChunk]:
+ ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]:
"""
Creates a model response for the given chat conversation.
@@ -412,7 +412,7 @@ def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionCreateResponse | Stream[AgentChatCompletionChunk]:
+ ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]:
return self._post(
"/chat/completions"
if self._client._base_url_overridden
@@ -446,7 +446,7 @@ def create(
),
cast_to=CompletionCreateResponse,
stream=stream or False,
- stream_cls=Stream[AgentChatCompletionChunk],
+ stream_cls=Stream[ChatCompletionChunk],
)
@@ -610,7 +610,7 @@ async def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AsyncStream[AgentChatCompletionChunk]:
+ ) -> AsyncStream[ChatCompletionChunk]:
"""
Creates a model response for the given chat conversation.
@@ -723,7 +723,7 @@ async def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionCreateResponse | AsyncStream[AgentChatCompletionChunk]:
+ ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]:
"""
Creates a model response for the given chat conversation.
@@ -836,7 +836,7 @@ async def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> CompletionCreateResponse | AsyncStream[AgentChatCompletionChunk]:
+ ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]:
return await self._post(
"/chat/completions"
if self._client._base_url_overridden
@@ -870,7 +870,7 @@ async def create(
),
cast_to=CompletionCreateResponse,
stream=stream or False,
- stream_cls=AsyncStream[AgentChatCompletionChunk],
+ stream_cls=AsyncStream[ChatCompletionChunk],
)
diff --git a/src/gradientai/resources/chat/completions.py b/src/gradientai/resources/chat/completions.py
index 7993f757..98377562 100644
--- a/src/gradientai/resources/chat/completions.py
+++ b/src/gradientai/resources/chat/completions.py
@@ -20,7 +20,7 @@
from ..._streaming import Stream, AsyncStream
from ...types.chat import completion_create_params
from ..._base_client import make_request_options
-from ...types.chat.chat_completion_chunk import ChatCompletionChunk
+from ...types.shared.chat_completion_chunk import ChatCompletionChunk
from ...types.chat.completion_create_response import CompletionCreateResponse
__all__ = ["CompletionsResource", "AsyncCompletionsResource"]
diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py
index 4ec63b92..c8144381 100644
--- a/src/gradientai/types/__init__.py
+++ b/src/gradientai/types/__init__.py
@@ -2,7 +2,12 @@
from __future__ import annotations
-from .shared import APIMeta as APIMeta, APILinks as APILinks, ChatCompletionTokenLogprob as ChatCompletionTokenLogprob
+from .shared import (
+ APIMeta as APIMeta,
+ APILinks as APILinks,
+ ChatCompletionChunk as ChatCompletionChunk,
+ ChatCompletionTokenLogprob as ChatCompletionTokenLogprob,
+)
from .api_agent import APIAgent as APIAgent
from .api_model import APIModel as APIModel
from .api_agreement import APIAgreement as APIAgreement
diff --git a/src/gradientai/types/agents/chat/__init__.py b/src/gradientai/types/agents/chat/__init__.py
index 305ba0af..9384ac14 100644
--- a/src/gradientai/types/agents/chat/__init__.py
+++ b/src/gradientai/types/agents/chat/__init__.py
@@ -4,4 +4,3 @@
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse
-from .agent_chat_completion_chunk import AgentChatCompletionChunk as AgentChatCompletionChunk
diff --git a/src/gradientai/types/agents/chat/agent_chat_completion_chunk.py b/src/gradientai/types/agents/chat/agent_chat_completion_chunk.py
deleted file mode 100644
index 36ee3d9e..00000000
--- a/src/gradientai/types/agents/chat/agent_chat_completion_chunk.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-from typing_extensions import Literal
-
-from ...._models import BaseModel
-from ...shared.chat_completion_token_logprob import ChatCompletionTokenLogprob
-
-__all__ = ["AgentChatCompletionChunk", "Choice", "ChoiceDelta", "ChoiceLogprobs", "Usage"]
-
-
-class ChoiceDelta(BaseModel):
- content: Optional[str] = None
- """The contents of the chunk message."""
-
- refusal: Optional[str] = None
- """The refusal message generated by the model."""
-
- role: Optional[Literal["developer", "user", "assistant"]] = None
- """The role of the author of this message."""
-
-
-class ChoiceLogprobs(BaseModel):
- content: Optional[List[ChatCompletionTokenLogprob]] = None
- """A list of message content tokens with log probability information."""
-
- refusal: Optional[List[ChatCompletionTokenLogprob]] = None
- """A list of message refusal tokens with log probability information."""
-
-
-class Choice(BaseModel):
- delta: ChoiceDelta
- """A chat completion delta generated by streamed model responses."""
-
- finish_reason: Optional[Literal["stop", "length"]] = None
- """The reason the model stopped generating tokens.
-
- This will be `stop` if the model hit a natural stop point or a provided stop
- sequence, or `length` if the maximum number of tokens specified in the request
- was reached
- """
-
- index: int
- """The index of the choice in the list of choices."""
-
- logprobs: Optional[ChoiceLogprobs] = None
- """Log probability information for the choice."""
-
-
-class Usage(BaseModel):
- completion_tokens: int
- """Number of tokens in the generated completion."""
-
- prompt_tokens: int
- """Number of tokens in the prompt."""
-
- total_tokens: int
- """Total number of tokens used in the request (prompt + completion)."""
-
-
-class AgentChatCompletionChunk(BaseModel):
- id: str
- """A unique identifier for the chat completion. Each chunk has the same ID."""
-
- choices: List[Choice]
- """A list of chat completion choices.
-
- Can contain more than one elements if `n` is greater than 1. Can also be empty
- for the last chunk if you set `stream_options: {"include_usage": true}`.
- """
-
- created: int
- """The Unix timestamp (in seconds) of when the chat completion was created.
-
- Each chunk has the same timestamp.
- """
-
- model: str
- """The model to generate the completion."""
-
- object: Literal["chat.completion.chunk"]
- """The object type, which is always `chat.completion.chunk`."""
-
- usage: Optional[Usage] = None
- """
- An optional field that will only be present when you set
- `stream_options: {"include_usage": true}` in your request. When present, it
- contains a null value **except for the last chunk** which contains the token
- usage statistics for the entire request.
-
- **NOTE:** If the stream is interrupted or cancelled, you may not receive the
- final usage chunk which contains the total token usage for the request.
- """
diff --git a/src/gradientai/types/chat/__init__.py b/src/gradientai/types/chat/__init__.py
index f0243162..9384ac14 100644
--- a/src/gradientai/types/chat/__init__.py
+++ b/src/gradientai/types/chat/__init__.py
@@ -2,6 +2,5 @@
from __future__ import annotations
-from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse
diff --git a/src/gradientai/types/shared/__init__.py b/src/gradientai/types/shared/__init__.py
index dc71bdd3..9fdd7605 100644
--- a/src/gradientai/types/shared/__init__.py
+++ b/src/gradientai/types/shared/__init__.py
@@ -2,4 +2,5 @@
from .api_meta import APIMeta as APIMeta
from .api_links import APILinks as APILinks
+from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob
diff --git a/src/gradientai/types/chat/chat_completion_chunk.py b/src/gradientai/types/shared/chat_completion_chunk.py
similarity index 97%
rename from src/gradientai/types/chat/chat_completion_chunk.py
rename to src/gradientai/types/shared/chat_completion_chunk.py
index 4adcc63d..4d45ef8d 100644
--- a/src/gradientai/types/chat/chat_completion_chunk.py
+++ b/src/gradientai/types/shared/chat_completion_chunk.py
@@ -4,7 +4,7 @@
from typing_extensions import Literal
from ..._models import BaseModel
-from ..shared.chat_completion_token_logprob import ChatCompletionTokenLogprob
+from .chat_completion_token_logprob import ChatCompletionTokenLogprob
__all__ = ["ChatCompletionChunk", "Choice", "ChoiceDelta", "ChoiceLogprobs", "Usage"]
From 1f9bff40fa1a791c18563a3f65bbdf9e1fa898e6 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 3 Jul 2025 13:57:58 +0000
Subject: [PATCH 03/13] codegen metadata
---
.stats.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.stats.yml b/.stats.yml
index bfeef284..55043d79 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 76
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml
openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208
-config_hash: 6ed9ee8d3f0d6392816bfaf9dc4894a6
+config_hash: 558ec54e9e056494abf623ff424c104e
From bd6feccf97fa5877085783419f11dad04c57d700 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 3 Jul 2025 14:05:10 +0000
Subject: [PATCH 04/13] feat(api): manual updates
---
.stats.yml | 6 ++---
README.md | 22 +++++++++----------
api.md | 2 +-
.../resources/agents/chat/completions.py | 8 +++----
tests/test_client.py | 20 ++++++++---------
5 files changed, 29 insertions(+), 29 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 55043d79..1787fb3e 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 76
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml
+configured_endpoints: 77
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-391afaae764eb758523b67805cb47ae3bc319dc119d83414afdd66f123ceaf5c.yml
openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208
-config_hash: 558ec54e9e056494abf623ff424c104e
+config_hash: 6edaff3557194ba8897d14f7ca74589c
diff --git a/README.md b/README.md
index 6110f913..a4f699db 100644
--- a/README.md
+++ b/README.md
@@ -73,7 +73,7 @@ client = AsyncGradientAI(
async def main() -> None:
- completion = await client.agents.chat.completions.create(
+ completion = await client.chat.completions.create(
messages=[
{
"role": "user",
@@ -115,7 +115,7 @@ async def main() -> None:
api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted
http_client=DefaultAioHttpClient(),
) as client:
- completion = await client.agents.chat.completions.create(
+ completion = await client.chat.completions.create(
messages=[
{
"role": "user",
@@ -139,7 +139,7 @@ from gradientai import GradientAI
client = GradientAI()
-stream = client.agents.chat.completions.create(
+stream = client.chat.completions.create(
messages=[
{
"role": "user",
@@ -160,7 +160,7 @@ from gradientai import AsyncGradientAI
client = AsyncGradientAI()
-stream = await client.agents.chat.completions.create(
+stream = await client.chat.completions.create(
messages=[
{
"role": "user",
@@ -192,7 +192,7 @@ from gradientai import GradientAI
client = GradientAI()
-completion = client.agents.chat.completions.create(
+completion = client.chat.completions.create(
messages=[
{
"content": "string",
@@ -221,7 +221,7 @@ from gradientai import GradientAI
client = GradientAI()
try:
- client.agents.chat.completions.create(
+ client.chat.completions.create(
messages=[
{
"role": "user",
@@ -272,7 +272,7 @@ client = GradientAI(
)
# Or, configure per-request:
-client.with_options(max_retries=5).agents.chat.completions.create(
+client.with_options(max_retries=5).chat.completions.create(
messages=[
{
"role": "user",
@@ -303,7 +303,7 @@ client = GradientAI(
)
# Override per-request:
-client.with_options(timeout=5.0).agents.chat.completions.create(
+client.with_options(timeout=5.0).chat.completions.create(
messages=[
{
"role": "user",
@@ -352,7 +352,7 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to
from gradientai import GradientAI
client = GradientAI()
-response = client.agents.chat.completions.with_raw_response.create(
+response = client.chat.completions.with_raw_response.create(
messages=[{
"role": "user",
"content": "What is the capital of France?",
@@ -361,7 +361,7 @@ response = client.agents.chat.completions.with_raw_response.create(
)
print(response.headers.get('X-My-Header'))
-completion = response.parse() # get the object that `agents.chat.completions.create()` would have returned
+completion = response.parse() # get the object that `chat.completions.create()` would have returned
print(completion.choices)
```
@@ -376,7 +376,7 @@ The above interface eagerly reads the full response body when you make the reque
To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods.
```python
-with client.agents.chat.completions.with_streaming_response.create(
+with client.chat.completions.with_streaming_response.create(
messages=[
{
"role": "user",
diff --git a/api.md b/api.md
index af2e8a33..c6acd4ec 100644
--- a/api.md
+++ b/api.md
@@ -70,7 +70,7 @@ from gradientai.types.agents.chat import CompletionCreateResponse
Methods:
-- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse
+- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse
## EvaluationMetrics
diff --git a/src/gradientai/resources/agents/chat/completions.py b/src/gradientai/resources/agents/chat/completions.py
index 5923ef2a..4ec70d30 100644
--- a/src/gradientai/resources/agents/chat/completions.py
+++ b/src/gradientai/resources/agents/chat/completions.py
@@ -414,9 +414,9 @@ def create(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> CompletionCreateResponse | Stream[ChatCompletionChunk]:
return self._post(
- "/chat/completions"
+ "/chat/completions?agent=true"
if self._client._base_url_overridden
- else "https://inference.do-ai.run/v1/chat/completions",
+ else "https://inference.do-ai.run/v1/chat/completions?agent=true",
body=maybe_transform(
{
"messages": messages,
@@ -838,9 +838,9 @@ async def create(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]:
return await self._post(
- "/chat/completions"
+ "/chat/completions?agent=true"
if self._client._base_url_overridden
- else "https://inference.do-ai.run/v1/chat/completions",
+ else "https://inference.do-ai.run/v1/chat/completions?agent=true",
body=await async_maybe_transform(
{
"messages": messages,
diff --git a/tests/test_client.py b/tests/test_client.py
index c901e2c8..5c16eb22 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -879,7 +879,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien
respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error"))
with pytest.raises(APITimeoutError):
- client.agents.chat.completions.with_streaming_response.create(
+ client.chat.completions.with_streaming_response.create(
messages=[
{
"content": "string",
@@ -897,7 +897,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client
respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500))
with pytest.raises(APIStatusError):
- client.agents.chat.completions.with_streaming_response.create(
+ client.chat.completions.with_streaming_response.create(
messages=[
{
"content": "string",
@@ -934,7 +934,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
- response = client.agents.chat.completions.with_raw_response.create(
+ response = client.chat.completions.with_raw_response.create(
messages=[
{
"content": "string",
@@ -966,7 +966,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
- response = client.agents.chat.completions.with_raw_response.create(
+ response = client.chat.completions.with_raw_response.create(
messages=[
{
"content": "string",
@@ -998,7 +998,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
- response = client.agents.chat.completions.with_raw_response.create(
+ response = client.chat.completions.with_raw_response.create(
messages=[
{
"content": "string",
@@ -1888,7 +1888,7 @@ async def test_retrying_timeout_errors_doesnt_leak(
respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error"))
with pytest.raises(APITimeoutError):
- await async_client.agents.chat.completions.with_streaming_response.create(
+ await async_client.chat.completions.with_streaming_response.create(
messages=[
{
"content": "string",
@@ -1908,7 +1908,7 @@ async def test_retrying_status_errors_doesnt_leak(
respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500))
with pytest.raises(APIStatusError):
- await async_client.agents.chat.completions.with_streaming_response.create(
+ await async_client.chat.completions.with_streaming_response.create(
messages=[
{
"content": "string",
@@ -1946,7 +1946,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
- response = await client.agents.chat.completions.with_raw_response.create(
+ response = await client.chat.completions.with_raw_response.create(
messages=[
{
"content": "string",
@@ -1979,7 +1979,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
- response = await client.agents.chat.completions.with_raw_response.create(
+ response = await client.chat.completions.with_raw_response.create(
messages=[
{
"content": "string",
@@ -2012,7 +2012,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
respx_mock.post("/chat/completions").mock(side_effect=retry_handler)
- response = await client.agents.chat.completions.with_raw_response.create(
+ response = await client.chat.completions.with_raw_response.create(
messages=[
{
"content": "string",
From 1df657d9b384cb85d27fe839c0dab212a7773f8f Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 8 Jul 2025 02:19:36 +0000
Subject: [PATCH 05/13] chore(internal): codegen related update
---
requirements-dev.lock | 2 +-
requirements.lock | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/requirements-dev.lock b/requirements-dev.lock
index 85b6a829..7866f549 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -56,7 +56,7 @@ httpx==0.28.1
# via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python
# via httpx-aiohttp
# via respx
-httpx-aiohttp==0.1.6
+httpx-aiohttp==0.1.8
# via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python
idna==3.4
# via anyio
diff --git a/requirements.lock b/requirements.lock
index 47944bd5..2a8aeea9 100644
--- a/requirements.lock
+++ b/requirements.lock
@@ -43,7 +43,7 @@ httpcore==1.0.2
httpx==0.28.1
# via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python
# via httpx-aiohttp
-httpx-aiohttp==0.1.6
+httpx-aiohttp==0.1.8
# via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python
idna==3.4
# via anyio
From 6f4e960b6cb838cbf5e50301375fcb4b60a2cfb3 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 9 Jul 2025 02:36:34 +0000
Subject: [PATCH 06/13] chore(internal): bump pinned h11 dep
---
requirements-dev.lock | 4 ++--
requirements.lock | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements-dev.lock b/requirements-dev.lock
index 7866f549..94875b2e 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -48,9 +48,9 @@ filelock==3.12.4
frozenlist==1.6.2
# via aiohttp
# via aiosignal
-h11==0.14.0
+h11==0.16.0
# via httpcore
-httpcore==1.0.2
+httpcore==1.0.9
# via httpx
httpx==0.28.1
# via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python
diff --git a/requirements.lock b/requirements.lock
index 2a8aeea9..b16bfc5e 100644
--- a/requirements.lock
+++ b/requirements.lock
@@ -36,9 +36,9 @@ exceptiongroup==1.2.2
frozenlist==1.6.2
# via aiohttp
# via aiosignal
-h11==0.14.0
+h11==0.16.0
# via httpcore
-httpcore==1.0.2
+httpcore==1.0.9
# via httpx
httpx==0.28.1
# via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python
From 1a899b66a484986672a380e405f09b1ae94b6310 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 9 Jul 2025 02:56:16 +0000
Subject: [PATCH 07/13] chore(package): mark python 3.13 as supported
---
pyproject.toml | 1 +
1 file changed, 1 insertion(+)
diff --git a/pyproject.toml b/pyproject.toml
index 9d1a9e36..40bb5b9d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -24,6 +24,7 @@ classifiers = [
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
"Operating System :: OS Independent",
"Operating System :: POSIX",
"Operating System :: MacOS",
From 569e473d422928597ccf762133d5e52ac9a8665a Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 10 Jul 2025 03:04:54 +0000
Subject: [PATCH 08/13] fix(parsing): correctly handle nested discriminated
unions
---
src/gradientai/_models.py | 13 ++++++-----
tests/test_models.py | 45 +++++++++++++++++++++++++++++++++++++++
2 files changed, 53 insertions(+), 5 deletions(-)
diff --git a/src/gradientai/_models.py b/src/gradientai/_models.py
index 4f214980..528d5680 100644
--- a/src/gradientai/_models.py
+++ b/src/gradientai/_models.py
@@ -2,9 +2,10 @@
import os
import inspect
-from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, cast
+from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast
from datetime import date, datetime
from typing_extensions import (
+ List,
Unpack,
Literal,
ClassVar,
@@ -366,7 +367,7 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object:
if type_ is None:
raise RuntimeError(f"Unexpected field type is None for {key}")
- return construct_type(value=value, type_=type_)
+ return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None))
def is_basemodel(type_: type) -> bool:
@@ -420,7 +421,7 @@ def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T:
return cast(_T, construct_type(value=value, type_=type_))
-def construct_type(*, value: object, type_: object) -> object:
+def construct_type(*, value: object, type_: object, metadata: Optional[List[Any]] = None) -> object:
"""Loose coercion to the expected type with construction of nested values.
If the given value does not match the expected type then it is returned as-is.
@@ -438,8 +439,10 @@ def construct_type(*, value: object, type_: object) -> object:
type_ = type_.__value__ # type: ignore[unreachable]
# unwrap `Annotated[T, ...]` -> `T`
- if is_annotated_type(type_):
- meta: tuple[Any, ...] = get_args(type_)[1:]
+ if metadata is not None:
+ meta: tuple[Any, ...] = tuple(metadata)
+ elif is_annotated_type(type_):
+ meta = get_args(type_)[1:]
type_ = extract_type_arg(type_, 0)
else:
meta = tuple()
diff --git a/tests/test_models.py b/tests/test_models.py
index 28aff1f3..3a857584 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -889,3 +889,48 @@ class ModelB(BaseModel):
)
assert isinstance(m, ModelB)
+
+
+def test_nested_discriminated_union() -> None:
+ class InnerType1(BaseModel):
+ type: Literal["type_1"]
+
+ class InnerModel(BaseModel):
+ inner_value: str
+
+ class InnerType2(BaseModel):
+ type: Literal["type_2"]
+ some_inner_model: InnerModel
+
+ class Type1(BaseModel):
+ base_type: Literal["base_type_1"]
+ value: Annotated[
+ Union[
+ InnerType1,
+ InnerType2,
+ ],
+ PropertyInfo(discriminator="type"),
+ ]
+
+ class Type2(BaseModel):
+ base_type: Literal["base_type_2"]
+
+ T = Annotated[
+ Union[
+ Type1,
+ Type2,
+ ],
+ PropertyInfo(discriminator="base_type"),
+ ]
+
+ model = construct_type(
+ type_=T,
+ value={
+ "base_type": "base_type_1",
+ "value": {
+ "type": "type_2",
+ },
+ },
+ )
+ assert isinstance(model, Type1)
+ assert isinstance(model.value, InnerType2)
From edec861a2f84f94bbfc586dc63a74e404d7d655f Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 10 Jul 2025 22:02:46 +0000
Subject: [PATCH 09/13] codegen metadata
---
.stats.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.stats.yml b/.stats.yml
index 1787fb3e..89f80bc1 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 77
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-391afaae764eb758523b67805cb47ae3bc319dc119d83414afdd66f123ceaf5c.yml
openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208
-config_hash: 6edaff3557194ba8897d14f7ca74589c
+config_hash: 0bd094d86a010f7cbd5eb22ef548a29f
From 6fbe83b11a9e3dbb40cf7f9f627abbbd086ee24a Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 11 Jul 2025 03:10:30 +0000
Subject: [PATCH 10/13] chore(readme): fix version rendering on pypi
---
README.md | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index a4f699db..0aff8478 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,7 @@
# GradientAI Python API library
-[![PyPI version]()](https://pypi.org/project/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python/)
+
+[)](https://pypi.org/project/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python/)
The GradientAI Python library provides convenient access to the GradientAI REST API from any Python 3.8+
application. The library includes type definitions for all request params and response fields,
From 507a342fbcc7c801ba36708e56ea2d2a28a1a392 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Sat, 12 Jul 2025 02:16:35 +0000
Subject: [PATCH 11/13] fix(client): don't send Content-Type header on GET
requests
---
pyproject.toml | 2 +-
src/gradientai/_base_client.py | 11 +++++++++--
tests/test_client.py | 4 ++--
3 files changed, 12 insertions(+), 5 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index 40bb5b9d..a70bba90 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -39,7 +39,7 @@ Homepage = "https://github.com/digitalocean/gradientai-python"
Repository = "https://github.com/digitalocean/gradientai-python"
[project.optional-dependencies]
-aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.6"]
+aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.8"]
[tool.rye]
managed = true
diff --git a/src/gradientai/_base_client.py b/src/gradientai/_base_client.py
index 6dce600b..379c27d1 100644
--- a/src/gradientai/_base_client.py
+++ b/src/gradientai/_base_client.py
@@ -529,6 +529,15 @@ def _build_request(
# work around https://github.com/encode/httpx/discussions/2880
kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")}
+ is_body_allowed = options.method.lower() != "get"
+
+ if is_body_allowed:
+ kwargs["json"] = json_data if is_given(json_data) else None
+ kwargs["files"] = files
+ else:
+ headers.pop("Content-Type", None)
+ kwargs.pop("data", None)
+
# TODO: report this error to httpx
return self._client.build_request( # pyright: ignore[reportUnknownMemberType]
headers=headers,
@@ -540,8 +549,6 @@ def _build_request(
# so that passing a `TypedDict` doesn't cause an error.
# https://github.com/microsoft/pyright/issues/3526#event-6715453066
params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None,
- json=json_data if is_given(json_data) else None,
- files=files,
**kwargs,
)
diff --git a/tests/test_client.py b/tests/test_client.py
index 5c16eb22..61013a0a 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -546,7 +546,7 @@ def test_request_extra_query(self) -> None:
def test_multipart_repeating_array(self, client: GradientAI) -> None:
request = client._build_request(
FinalRequestOptions.construct(
- method="get",
+ method="post",
url="/foo",
headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"},
json_data={"array": ["foo", "bar"]},
@@ -1548,7 +1548,7 @@ def test_request_extra_query(self) -> None:
def test_multipart_repeating_array(self, async_client: AsyncGradientAI) -> None:
request = async_client._build_request(
FinalRequestOptions.construct(
- method="get",
+ method="post",
url="/foo",
headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"},
json_data={"array": ["foo", "bar"]},
From 64ee5b449c0195288d0a1dc55d2725e8cdd6afcf Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 15 Jul 2025 02:16:15 +0000
Subject: [PATCH 12/13] feat: clean up environment call outs
---
README.md | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 0aff8478..2c739c6d 100644
--- a/README.md
+++ b/README.md
@@ -105,7 +105,6 @@ pip install --pre c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python[aiohttp]
Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`:
```python
-import os
import asyncio
from gradientai import DefaultAioHttpClient
from gradientai import AsyncGradientAI
@@ -113,7 +112,7 @@ from gradientai import AsyncGradientAI
async def main() -> None:
async with AsyncGradientAI(
- api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted
+ api_key="My API Key",
http_client=DefaultAioHttpClient(),
) as client:
completion = await client.chat.completions.create(
From 0d7da54e7a3188cc5de3fb798a9eba30e721127b Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 15 Jul 2025 02:17:07 +0000
Subject: [PATCH 13/13] release: 0.1.0-alpha.13
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 25 +++++++++++++++++++++++++
pyproject.toml | 2 +-
src/gradientai/_version.py | 2 +-
4 files changed, 28 insertions(+), 3 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index fd0ccba9..000572ec 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.1.0-alpha.12"
+ ".": "0.1.0-alpha.13"
}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 555608d3..5aea5ed7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,30 @@
# Changelog
+## 0.1.0-alpha.13 (2025-07-15)
+
+Full Changelog: [v0.1.0-alpha.12...v0.1.0-alpha.13](https://github.com/digitalocean/gradientai-python/compare/v0.1.0-alpha.12...v0.1.0-alpha.13)
+
+### Features
+
+* **api:** manual updates ([bd6fecc](https://github.com/digitalocean/gradientai-python/commit/bd6feccf97fa5877085783419f11dad04c57d700))
+* **api:** manual updates ([c2b96ce](https://github.com/digitalocean/gradientai-python/commit/c2b96ce3d95cc9b74bffd8d6a499927eefd23b14))
+* **api:** share chat completion chunk model between chat and agent.chat ([d67371f](https://github.com/digitalocean/gradientai-python/commit/d67371f9f4d0761ea03097820bc3e77654b4d2bf))
+* clean up environment call outs ([64ee5b4](https://github.com/digitalocean/gradientai-python/commit/64ee5b449c0195288d0a1dc55d2725e8cdd6afcf))
+
+
+### Bug Fixes
+
+* **client:** don't send Content-Type header on GET requests ([507a342](https://github.com/digitalocean/gradientai-python/commit/507a342fbcc7c801ba36708e56ea2d2a28a1a392))
+* **parsing:** correctly handle nested discriminated unions ([569e473](https://github.com/digitalocean/gradientai-python/commit/569e473d422928597ccf762133d5e52ac9a8665a))
+
+
+### Chores
+
+* **internal:** bump pinned h11 dep ([6f4e960](https://github.com/digitalocean/gradientai-python/commit/6f4e960b6cb838cbf5e50301375fcb4b60a2cfb3))
+* **internal:** codegen related update ([1df657d](https://github.com/digitalocean/gradientai-python/commit/1df657d9b384cb85d27fe839c0dab212a7773f8f))
+* **package:** mark python 3.13 as supported ([1a899b6](https://github.com/digitalocean/gradientai-python/commit/1a899b66a484986672a380e405f09b1ae94b6310))
+* **readme:** fix version rendering on pypi ([6fbe83b](https://github.com/digitalocean/gradientai-python/commit/6fbe83b11a9e3dbb40cf7f9f627abbbd086ee24a))
+
## 0.1.0-alpha.12 (2025-07-02)
Full Changelog: [v0.1.0-alpha.11...v0.1.0-alpha.12](https://github.com/digitalocean/gradientai-python/compare/v0.1.0-alpha.11...v0.1.0-alpha.12)
diff --git a/pyproject.toml b/pyproject.toml
index a70bba90..042566f4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python"
-version = "0.1.0-alpha.12"
+version = "0.1.0-alpha.13"
description = "The official Python library for GradientAI"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/gradientai/_version.py b/src/gradientai/_version.py
index 0084d0f3..e6299f17 100644
--- a/src/gradientai/_version.py
+++ b/src/gradientai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "gradientai"
-__version__ = "0.1.0-alpha.12" # x-release-please-version
+__version__ = "0.1.0-alpha.13" # x-release-please-version