From b370349a68d24b00854e3f54df50c86f2c29651b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 6 Aug 2025 08:50:08 +0000 Subject: [PATCH 1/7] chore(internal): fix ruff target version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index b21336d9..348648c5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -142,7 +142,7 @@ reportPrivateUsage = false [tool.ruff] line-length = 120 output-format = "grouped" -target-version = "py37" +target-version = "py38" [tool.ruff.format] docstring-code-format = true From ed70ab72ce3faecd7fb5070f429275518b7aa6f2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 18:44:42 +0000 Subject: [PATCH 2/7] feat(api): rename environment variables To match the ui and DO ecosystem --- .stats.yml | 2 +- README.md | 4 ++-- src/gradient/_client.py | 24 ++++++++++++------------ tests/test_client.py | 4 ++-- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/.stats.yml b/.stats.yml index 9dda6e1d..b57aebbe 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 170 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-9aca3802735e1375125412aa28ac36bf2175144b8218610a73d2e7f775694dff.yml openapi_spec_hash: e29d14e3e4679fcf22b3e760e49931b1 -config_hash: de89a9c8fde0120577d2aca8be4ae027 +config_hash: 136e1973eb6297e6308a165594bd00a3 diff --git a/README.md b/README.md index ca6a00e8..ae742621 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ import os from gradient import Gradient client = Gradient( - api_key=os.environ.get("GRADIENT_API_KEY"), # This is the default and can be omitted + api_key=os.environ.get("DIGITALOCEAN_ACCESS_TOKEN"), # This is the default and can be omitted ) inference_client = Gradient( inference_key=os.environ.get( @@ -105,7 +105,7 @@ import asyncio from gradient import AsyncGradient client = AsyncGradient( - api_key=os.environ.get("GRADIENT_API_KEY"), # This is the default and can be omitted + api_key=os.environ.get("DIGITALOCEAN_ACCESS_TOKEN"), # This is the default and can be omitted ) diff --git a/src/gradient/_client.py b/src/gradient/_client.py index c25e3976..2cc93743 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -96,20 +96,20 @@ def __init__( """Construct a new synchronous Gradient client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - - `api_key` from `GRADIENT_API_KEY` - - `inference_key` from `GRADIENT_INFERENCE_KEY` - - `agent_key` from `GRADIENT_AGENT_KEY` + - `api_key` from `DIGITALOCEAN_ACCESS_TOKEN` + - `inference_key` from `GRADIENT_MODEL_ACCESS_KEY` + - `agent_key` from `GRADIENT_AGENT_ACCESS_KEY` """ if api_key is None: - api_key = os.environ.get("GRADIENT_API_KEY") + api_key = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") self.api_key = api_key if inference_key is None: - inference_key = os.environ.get("GRADIENT_INFERENCE_KEY") + inference_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY") self.inference_key = inference_key if agent_key is None: - agent_key = os.environ.get("GRADIENT_AGENT_KEY") + agent_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") self.agent_key = agent_key self._agent_endpoint = agent_endpoint @@ -364,20 +364,20 @@ def __init__( """Construct a new async AsyncGradient client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - - `api_key` from `GRADIENT_API_KEY` - - `inference_key` from `GRADIENT_INFERENCE_KEY` - - `agent_key` from `GRADIENT_AGENT_KEY` + - `api_key` from `DIGITALOCEAN_ACCESS_TOKEN` + - `inference_key` from `GRADIENT_MODEL_ACCESS_KEY` + - `agent_key` from `GRADIENT_AGENT_ACCESS_KEY` """ if api_key is None: - api_key = os.environ.get("GRADIENT_API_KEY") + api_key = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") self.api_key = api_key if inference_key is None: - inference_key = os.environ.get("GRADIENT_INFERENCE_KEY") + inference_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY") self.inference_key = inference_key if agent_key is None: - agent_key = os.environ.get("GRADIENT_AGENT_KEY") + agent_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") self.agent_key = agent_key self._agent_endpoint = agent_endpoint diff --git a/tests/test_client.py b/tests/test_client.py index caf79355..4a48f4a0 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -403,7 +403,7 @@ def test_validate_headers(self) -> None: request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" - with update_env(**{"GRADIENT_API_KEY": Omit()}): + with update_env(**{"DIGITALOCEAN_ACCESS_TOKEN": Omit()}): client2 = Gradient( base_url=base_url, api_key=None, @@ -1405,7 +1405,7 @@ def test_validate_headers(self) -> None: request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" - with update_env(**{"GRADIENT_API_KEY": Omit()}): + with update_env(**{"DIGITALOCEAN_ACCESS_TOKEN": Omit()}): client2 = AsyncGradient( base_url=base_url, api_key=None, From 6853d0542055a29a70685cab67414e5612890c7d Mon Sep 17 00:00:00 2001 From: "stainless-sdks[bot]" <167585319+stainless-sdks[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 15:09:49 -0400 Subject: [PATCH 3/7] fix(config): align environment variables with other DO tools and console (#40) (#41) Co-authored-by: Ben Batha --- README.md | 6 +- src/gradient/_client.py | 91 ++++++++-- tests/test_client.py | 358 ++++++++++++++++++++++++++++++---------- 3 files changed, 350 insertions(+), 105 deletions(-) diff --git a/README.md b/README.md index ae742621..41f16c3d 100644 --- a/README.md +++ b/README.md @@ -46,11 +46,11 @@ client = Gradient( ) inference_client = Gradient( inference_key=os.environ.get( - "GRADIENT_INFERENCE_KEY" + "GRADIENT_MODEL_ACCESS_KEY" ), # This is the default and can be omitted ) agent_client = Gradient( - agent_key=os.environ.get("GRADIENT_AGENT_KEY"), # This is the default and can be omitted + agent_key=os.environ.get("GRADIENT_AGENT_ACCESS_KEY"), # This is the default and can be omitted agent_endpoint="https://my-agent.agents.do-ai.run", ) @@ -92,7 +92,7 @@ print(agent_response.choices[0].message.content) While you can provide an `api_key`, `inference_key` keyword argument, we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) -to add `GRADIENT_API_KEY="My API Key"`, `GRADIENT_INFERENCE_KEY="My INFERENCE Key"` to your `.env` file +to add `DIGITALOCEAN_ACCESS_TOKEN="My API Key"`, `GRADIENT_MODEL_ACCESS_KEY="My INFERENCE Key"` to your `.env` file so that your keys are not stored in source control. ## Async usage diff --git a/src/gradient/_client.py b/src/gradient/_client.py index 2cc93743..a51a38db 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -32,7 +32,16 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, databases, inference, gpu_droplets, knowledge_bases + from .resources import ( + chat, + agents, + models, + regions, + databases, + inference, + gpu_droplets, + knowledge_bases, + ) from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.gpu_droplets import ( @@ -102,14 +111,24 @@ def __init__( """ if api_key is None: api_key = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") + # support for legacy environment variable + if api_key is None: + api_key = os.environ.get("GRADIENT_API_KEY") self.api_key = api_key if inference_key is None: inference_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY") + # support for legacy environment variable + if inference_key is None: + inference_key = os.environ.get("GRADIENT_INFERENCE_KEY") + self.inference_key = inference_key if agent_key is None: agent_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") + # support for legacy environment variable + if agent_key is None: + agent_key = os.environ.get("GRADIENT_AGENT_KEY") self.agent_key = agent_key self._agent_endpoint = agent_endpoint @@ -226,7 +245,9 @@ def default_headers(self) -> dict[str, str | Omit]: @override def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: - if (self.api_key or self.agent_key or self.inference_key) and headers.get("Authorization"): + if (self.api_key or self.agent_key or self.inference_key) and headers.get( + "Authorization" + ): return if isinstance(custom_headers.get("Authorization"), Omit): return @@ -256,10 +277,14 @@ def copy( Create a new client instance re-using the same options given to the current client with optional overriding. """ if default_headers is not None and set_default_headers is not None: - raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") + raise ValueError( + "The `default_headers` and `set_default_headers` arguments are mutually exclusive" + ) if default_query is not None and set_default_query is not None: - raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") + raise ValueError( + "The `default_query` and `set_default_query` arguments are mutually exclusive" + ) headers = self._custom_headers if default_headers is not None: @@ -306,10 +331,14 @@ def _make_status_error( return _exceptions.BadRequestError(err_msg, response=response, body=body) if response.status_code == 401: - return _exceptions.AuthenticationError(err_msg, response=response, body=body) + return _exceptions.AuthenticationError( + err_msg, response=response, body=body + ) if response.status_code == 403: - return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) + return _exceptions.PermissionDeniedError( + err_msg, response=response, body=body + ) if response.status_code == 404: return _exceptions.NotFoundError(err_msg, response=response, body=body) @@ -318,13 +347,17 @@ def _make_status_error( return _exceptions.ConflictError(err_msg, response=response, body=body) if response.status_code == 422: - return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) + return _exceptions.UnprocessableEntityError( + err_msg, response=response, body=body + ) if response.status_code == 429: return _exceptions.RateLimitError(err_msg, response=response, body=body) if response.status_code >= 500: - return _exceptions.InternalServerError(err_msg, response=response, body=body) + return _exceptions.InternalServerError( + err_msg, response=response, body=body + ) return APIStatusError(err_msg, response=response, body=body) @@ -370,14 +403,24 @@ def __init__( """ if api_key is None: api_key = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") + # support for legacy environment variable + if api_key is None: + api_key = os.environ.get("GRADIENT_API_KEY") self.api_key = api_key if inference_key is None: inference_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY") + # support for legacy environment variable + if inference_key is None: + inference_key = os.environ.get("GRADIENT_INFERENCE_KEY") + self.api_key = api_key self.inference_key = inference_key if agent_key is None: agent_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") + # support for legacy environment variable + if agent_key is None: + agent_key = os.environ.get("GRADIENT_AGENT_KEY") self.agent_key = agent_key self._agent_endpoint = agent_endpoint @@ -494,7 +537,9 @@ def default_headers(self) -> dict[str, str | Omit]: @override def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: - if (self.api_key or self.agent_key or self.inference_key) and headers.get("Authorization"): + if (self.api_key or self.agent_key or self.inference_key) and headers.get( + "Authorization" + ): return if isinstance(custom_headers.get("Authorization"), Omit): return @@ -524,10 +569,14 @@ def copy( Create a new client instance re-using the same options given to the current client with optional overriding. """ if default_headers is not None and set_default_headers is not None: - raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") + raise ValueError( + "The `default_headers` and `set_default_headers` arguments are mutually exclusive" + ) if default_query is not None and set_default_query is not None: - raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") + raise ValueError( + "The `default_query` and `set_default_query` arguments are mutually exclusive" + ) headers = self._custom_headers if default_headers is not None: @@ -574,10 +623,14 @@ def _make_status_error( return _exceptions.BadRequestError(err_msg, response=response, body=body) if response.status_code == 401: - return _exceptions.AuthenticationError(err_msg, response=response, body=body) + return _exceptions.AuthenticationError( + err_msg, response=response, body=body + ) if response.status_code == 403: - return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) + return _exceptions.PermissionDeniedError( + err_msg, response=response, body=body + ) if response.status_code == 404: return _exceptions.NotFoundError(err_msg, response=response, body=body) @@ -586,13 +639,17 @@ def _make_status_error( return _exceptions.ConflictError(err_msg, response=response, body=body) if response.status_code == 422: - return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) + return _exceptions.UnprocessableEntityError( + err_msg, response=response, body=body + ) if response.status_code == 429: return _exceptions.RateLimitError(err_msg, response=response, body=body) if response.status_code >= 500: - return _exceptions.InternalServerError(err_msg, response=response, body=body) + return _exceptions.InternalServerError( + err_msg, response=response, body=body + ) return APIStatusError(err_msg, response=response, body=body) @@ -811,7 +868,9 @@ def knowledge_bases( AsyncKnowledgeBasesResourceWithStreamingResponse, ) - return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) + return AsyncKnowledgeBasesResourceWithStreamingResponse( + self._client.knowledge_bases + ) @cached_property def models(self) -> models.AsyncModelsResourceWithStreamingResponse: diff --git a/tests/test_client.py b/tests/test_client.py index 4a48f4a0..8eb2c616 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -25,7 +25,11 @@ from gradient._types import Omit from gradient._models import BaseModel, FinalRequestOptions from gradient._streaming import Stream, AsyncStream -from gradient._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError +from gradient._exceptions import ( + APIStatusError, + APITimeoutError, + APIResponseValidationError, +) from gradient._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, @@ -55,7 +59,9 @@ def _low_retry_timeout(*_args: Any, **_kwargs: Any) -> float: def _get_open_connections(client: Gradient | AsyncGradient) -> int: transport = client._client._transport - assert isinstance(transport, httpx.HTTPTransport) or isinstance(transport, httpx.AsyncHTTPTransport) + assert isinstance(transport, httpx.HTTPTransport) or isinstance( + transport, httpx.AsyncHTTPTransport + ) pool = transport._pool return len(pool._requests) @@ -72,7 +78,9 @@ class TestGradient: @pytest.mark.respx(base_url=base_url) def test_raw_response(self, respx_mock: MockRouter) -> None: - respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + respx_mock.post("/foo").mock( + return_value=httpx.Response(200, json={"foo": "bar"}) + ) response = self.client.post("/foo", cast_to=httpx.Response) assert response.status_code == 200 @@ -82,7 +90,11 @@ def test_raw_response(self, respx_mock: MockRouter) -> None: @pytest.mark.respx(base_url=base_url) def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None: respx_mock.post("/foo").mock( - return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}') + return_value=httpx.Response( + 200, + headers={"Content-Type": "application/binary"}, + content='{"foo": "bar"}', + ) ) response = self.client.post("/foo", cast_to=httpx.Response) @@ -216,9 +228,14 @@ def test_copy_signature(self) -> None: continue copy_param = copy_signature.parameters.get(name) - assert copy_param is not None, f"copy() signature is missing the {name} param" + assert ( + copy_param is not None + ), f"copy() signature is missing the {name} param" - @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") + @pytest.mark.skipif( + sys.version_info >= (3, 10), + reason="fails because of a memory leak that started from 3.12", + ) def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") @@ -243,7 +260,9 @@ def build_request(options: FinalRequestOptions) -> None: tracemalloc.stop() - def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff) -> None: + def add_leak( + leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff + ) -> None: if diff.count == 0: # Avoid false positives by considering only leaks (i.e. allocations that persist). return @@ -282,7 +301,9 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic raise AssertionError() def test_request_timeout(self) -> None: - request = self.client._build_request(FinalRequestOptions(method="get", url="/foo")) + request = self.client._build_request( + FinalRequestOptions(method="get", url="/foo") + ) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT @@ -318,7 +339,9 @@ def test_http_client_timeout_option(self) -> None: http_client=http_client, ) - request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + request = client._build_request( + FinalRequestOptions(method="get", url="/foo") + ) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(None) @@ -333,7 +356,9 @@ def test_http_client_timeout_option(self) -> None: http_client=http_client, ) - request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + request = client._build_request( + FinalRequestOptions(method="get", url="/foo") + ) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT @@ -348,7 +373,9 @@ def test_http_client_timeout_option(self) -> None: http_client=http_client, ) - request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + request = client._build_request( + FinalRequestOptions(method="get", url="/foo") + ) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT # our default @@ -419,7 +446,9 @@ def test_validate_headers(self) -> None: client2._build_request(FinalRequestOptions(method="get", url="/foo")) request2 = client2._build_request( - FinalRequestOptions(method="get", url="/foo", headers={"Authorization": Omit()}) + FinalRequestOptions( + method="get", url="/foo", headers={"Authorization": Omit()} + ) ) assert request2.headers.get("Authorization") is None @@ -491,7 +520,9 @@ def test_request_extra_headers(self) -> None: assert request.headers.get("X-Foo") == "Foo" # `extra_headers` takes priority over `default_headers` when keys clash - request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request( + request = self.client.with_options( + default_headers={"X-Bar": "true"} + )._build_request( FinalRequestOptions( method="post", url="/foo", @@ -548,7 +579,9 @@ def test_multipart_repeating_array(self, client: Gradient) -> None: FinalRequestOptions.construct( method="post", url="/foo", - headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, + headers={ + "Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82" + }, json_data={"array": ["foo", "bar"]}, files=[("foo.txt", b"hello world")], ) @@ -580,7 +613,9 @@ class Model1(BaseModel): class Model2(BaseModel): foo: str - respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + respx_mock.get("/foo").mock( + return_value=httpx.Response(200, json={"foo": "bar"}) + ) response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model2) @@ -596,7 +631,9 @@ class Model1(BaseModel): class Model2(BaseModel): foo: str - respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + respx_mock.get("/foo").mock( + return_value=httpx.Response(200, json={"foo": "bar"}) + ) response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model2) @@ -609,7 +646,9 @@ class Model2(BaseModel): assert response.foo == 1 @pytest.mark.respx(base_url=base_url) - def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None: + def test_non_application_json_content_type_for_json_data( + self, respx_mock: MockRouter + ) -> None: """ Response that sets Content-Type to something other than application/json but returns json data """ @@ -646,7 +685,10 @@ def test_base_url_setter(self) -> None: def test_base_url_env(self) -> None: with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"): client = Gradient( - api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) assert client.base_url == "http://localhost:5000/from/env/" @@ -779,7 +821,9 @@ def test_client_response_validation_error(self, respx_mock: MockRouter) -> None: class Model(BaseModel): foo: str - respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}})) + respx_mock.get("/foo").mock( + return_value=httpx.Response(200, json={"foo": {"invalid": True}}) + ) with pytest.raises(APIResponseValidationError) as exc: self.client.get("/foo", cast_to=Model) @@ -802,9 +846,13 @@ def test_default_stream_cls(self, respx_mock: MockRouter) -> None: class Model(BaseModel): name: str - respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + respx_mock.post("/foo").mock( + return_value=httpx.Response(200, json={"foo": "bar"}) + ) - stream = self.client.post("/foo", cast_to=Model, stream=True, stream_cls=Stream[Model]) + stream = self.client.post( + "/foo", cast_to=Model, stream=True, stream_cls=Stream[Model] + ) assert isinstance(stream, Stream) stream.response.close() @@ -813,7 +861,9 @@ def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None: class Model(BaseModel): name: str - respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) + respx_mock.get("/foo").mock( + return_value=httpx.Response(200, text="my-custom-format") + ) strict_client = Gradient( base_url=base_url, @@ -859,7 +909,9 @@ class Model(BaseModel): ], ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) - def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: + def test_parse_retry_after_header( + self, remaining_retries: int, retry_after: str, timeout: float + ) -> None: client = Gradient( base_url=base_url, api_key=api_key, @@ -870,13 +922,21 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) - calculated = client._calculate_retry_timeout(remaining_retries, options, headers) + calculated = client._calculate_retry_timeout( + remaining_retries, options, headers + ) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch( + "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout + ) @pytest.mark.respx(base_url=base_url) - def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: Gradient) -> None: - respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) + def test_retrying_timeout_errors_doesnt_leak( + self, respx_mock: MockRouter, client: Gradient + ) -> None: + respx_mock.post("/chat/completions").mock( + side_effect=httpx.TimeoutException("Test timeout error") + ) with pytest.raises(APITimeoutError): client.chat.completions.with_streaming_response.create( @@ -891,9 +951,13 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien assert _get_open_connections(self.client) == 0 - @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch( + "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout + ) @pytest.mark.respx(base_url=base_url) - def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: Gradient) -> None: + def test_retrying_status_errors_doesnt_leak( + self, respx_mock: MockRouter, client: Gradient + ) -> None: respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): @@ -909,7 +973,9 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch( + "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout + ) @pytest.mark.respx(base_url=base_url) @pytest.mark.parametrize("failure_mode", ["status", "exception"]) def test_retries_taken( @@ -945,10 +1011,15 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: ) assert response.retries_taken == failures_before_success - assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success + assert ( + int(response.http_request.headers.get("x-stainless-retry-count")) + == failures_before_success + ) @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch( + "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout + ) @pytest.mark.respx(base_url=base_url) def test_omit_retry_count_header( self, client: Gradient, failures_before_success: int, respx_mock: MockRouter @@ -977,10 +1048,14 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: extra_headers={"x-stainless-retry-count": Omit()}, ) - assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 + assert ( + len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 + ) @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch( + "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout + ) @pytest.mark.respx(base_url=base_url) def test_overwrite_retry_count_header( self, client: Gradient, failures_before_success: int, respx_mock: MockRouter @@ -1037,11 +1112,17 @@ def test_default_client_creation(self) -> None: def test_follow_redirects(self, respx_mock: MockRouter) -> None: # Test that the default follow_redirects=True allows following redirects respx_mock.post("/redirect").mock( - return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + return_value=httpx.Response( + 302, headers={"Location": f"{base_url}/redirected"} + ) + ) + respx_mock.get("/redirected").mock( + return_value=httpx.Response(200, json={"status": "ok"}) ) - respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) - response = self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + response = self.client.post( + "/redirect", body={"key": "value"}, cast_to=httpx.Response + ) assert response.status_code == 200 assert response.json() == {"status": "ok"} @@ -1049,12 +1130,17 @@ def test_follow_redirects(self, respx_mock: MockRouter) -> None: def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: # Test that follow_redirects=False prevents following redirects respx_mock.post("/redirect").mock( - return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + return_value=httpx.Response( + 302, headers={"Location": f"{base_url}/redirected"} + ) ) with pytest.raises(APIStatusError) as exc_info: self.client.post( - "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response + "/redirect", + body={"key": "value"}, + options={"follow_redirects": False}, + cast_to=httpx.Response, ) assert exc_info.value.response.status_code == 302 @@ -1073,7 +1159,9 @@ class TestAsyncGradient: @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_raw_response(self, respx_mock: MockRouter) -> None: - respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + respx_mock.post("/foo").mock( + return_value=httpx.Response(200, json={"foo": "bar"}) + ) response = await self.client.post("/foo", cast_to=httpx.Response) assert response.status_code == 200 @@ -1084,7 +1172,11 @@ async def test_raw_response(self, respx_mock: MockRouter) -> None: @pytest.mark.asyncio async def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None: respx_mock.post("/foo").mock( - return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}') + return_value=httpx.Response( + 200, + headers={"Content-Type": "application/binary"}, + content='{"foo": "bar"}', + ) ) response = await self.client.post("/foo", cast_to=httpx.Response) @@ -1218,9 +1310,14 @@ def test_copy_signature(self) -> None: continue copy_param = copy_signature.parameters.get(name) - assert copy_param is not None, f"copy() signature is missing the {name} param" + assert ( + copy_param is not None + ), f"copy() signature is missing the {name} param" - @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") + @pytest.mark.skipif( + sys.version_info >= (3, 10), + reason="fails because of a memory leak that started from 3.12", + ) def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") @@ -1245,7 +1342,9 @@ def build_request(options: FinalRequestOptions) -> None: tracemalloc.stop() - def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff) -> None: + def add_leak( + leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff + ) -> None: if diff.count == 0: # Avoid false positives by considering only leaks (i.e. allocations that persist). return @@ -1284,7 +1383,9 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic raise AssertionError() async def test_request_timeout(self) -> None: - request = self.client._build_request(FinalRequestOptions(method="get", url="/foo")) + request = self.client._build_request( + FinalRequestOptions(method="get", url="/foo") + ) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT @@ -1320,7 +1421,9 @@ async def test_http_client_timeout_option(self) -> None: http_client=http_client, ) - request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + request = client._build_request( + FinalRequestOptions(method="get", url="/foo") + ) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(None) @@ -1335,7 +1438,9 @@ async def test_http_client_timeout_option(self) -> None: http_client=http_client, ) - request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + request = client._build_request( + FinalRequestOptions(method="get", url="/foo") + ) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT @@ -1350,7 +1455,9 @@ async def test_http_client_timeout_option(self) -> None: http_client=http_client, ) - request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + request = client._build_request( + FinalRequestOptions(method="get", url="/foo") + ) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT # our default @@ -1421,7 +1528,9 @@ def test_validate_headers(self) -> None: client2._build_request(FinalRequestOptions(method="get", url="/foo")) request2 = client2._build_request( - FinalRequestOptions(method="get", url="/foo", headers={"Authorization": Omit()}) + FinalRequestOptions( + method="get", url="/foo", headers={"Authorization": Omit()} + ) ) assert request2.headers.get("Authorization") is None @@ -1493,7 +1602,9 @@ def test_request_extra_headers(self) -> None: assert request.headers.get("X-Foo") == "Foo" # `extra_headers` takes priority over `default_headers` when keys clash - request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request( + request = self.client.with_options( + default_headers={"X-Bar": "true"} + )._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1550,7 +1661,9 @@ def test_multipart_repeating_array(self, async_client: AsyncGradient) -> None: FinalRequestOptions.construct( method="post", url="/foo", - headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, + headers={ + "Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82" + }, json_data={"array": ["foo", "bar"]}, files=[("foo.txt", b"hello world")], ) @@ -1582,9 +1695,13 @@ class Model1(BaseModel): class Model2(BaseModel): foo: str - respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + respx_mock.get("/foo").mock( + return_value=httpx.Response(200, json={"foo": "bar"}) + ) - response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + response = await self.client.get( + "/foo", cast_to=cast(Any, Union[Model1, Model2]) + ) assert isinstance(response, Model2) assert response.foo == "bar" @@ -1598,20 +1715,28 @@ class Model1(BaseModel): class Model2(BaseModel): foo: str - respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + respx_mock.get("/foo").mock( + return_value=httpx.Response(200, json={"foo": "bar"}) + ) - response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + response = await self.client.get( + "/foo", cast_to=cast(Any, Union[Model1, Model2]) + ) assert isinstance(response, Model2) assert response.foo == "bar" respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1})) - response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + response = await self.client.get( + "/foo", cast_to=cast(Any, Union[Model1, Model2]) + ) assert isinstance(response, Model1) assert response.foo == 1 @pytest.mark.respx(base_url=base_url) - async def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None: + async def test_non_application_json_content_type_for_json_data( + self, respx_mock: MockRouter + ) -> None: """ Response that sets Content-Type to something other than application/json but returns json data """ @@ -1648,7 +1773,10 @@ def test_base_url_setter(self) -> None: def test_base_url_env(self) -> None: with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"): client = AsyncGradient( - api_key=api_key, inference_key=inference_key, agent_key=agent_key, _strict_response_validation=True + api_key=api_key, + inference_key=inference_key, + agent_key=agent_key, + _strict_response_validation=True, ) assert client.base_url == "http://localhost:5000/from/env/" @@ -1779,11 +1907,15 @@ async def test_client_context_manager(self) -> None: @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio - async def test_client_response_validation_error(self, respx_mock: MockRouter) -> None: + async def test_client_response_validation_error( + self, respx_mock: MockRouter + ) -> None: class Model(BaseModel): foo: str - respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}})) + respx_mock.get("/foo").mock( + return_value=httpx.Response(200, json={"foo": {"invalid": True}}) + ) with pytest.raises(APIResponseValidationError) as exc: await self.client.get("/foo", cast_to=Model) @@ -1807,19 +1939,27 @@ async def test_default_stream_cls(self, respx_mock: MockRouter) -> None: class Model(BaseModel): name: str - respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) + respx_mock.post("/foo").mock( + return_value=httpx.Response(200, json={"foo": "bar"}) + ) - stream = await self.client.post("/foo", cast_to=Model, stream=True, stream_cls=AsyncStream[Model]) + stream = await self.client.post( + "/foo", cast_to=Model, stream=True, stream_cls=AsyncStream[Model] + ) assert isinstance(stream, AsyncStream) await stream.response.aclose() @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio - async def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None: + async def test_received_text_for_expected_json( + self, respx_mock: MockRouter + ) -> None: class Model(BaseModel): name: str - respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) + respx_mock.get("/foo").mock( + return_value=httpx.Response(200, text="my-custom-format") + ) strict_client = AsyncGradient( base_url=base_url, @@ -1866,7 +2006,9 @@ class Model(BaseModel): ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) @pytest.mark.asyncio - async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: + async def test_parse_retry_after_header( + self, remaining_retries: int, retry_after: str, timeout: float + ) -> None: client = AsyncGradient( base_url=base_url, api_key=api_key, @@ -1877,15 +2019,21 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) - calculated = client._calculate_retry_timeout(remaining_retries, options, headers) + calculated = client._calculate_retry_timeout( + remaining_retries, options, headers + ) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch( + "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout + ) @pytest.mark.respx(base_url=base_url) async def test_retrying_timeout_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradient ) -> None: - respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) + respx_mock.post("/chat/completions").mock( + side_effect=httpx.TimeoutException("Test timeout error") + ) with pytest.raises(APITimeoutError): await async_client.chat.completions.with_streaming_response.create( @@ -1900,7 +2048,9 @@ async def test_retrying_timeout_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 - @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch( + "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout + ) @pytest.mark.respx(base_url=base_url) async def test_retrying_status_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradient @@ -1920,7 +2070,9 @@ async def test_retrying_status_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch( + "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout + ) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio @pytest.mark.parametrize("failure_mode", ["status", "exception"]) @@ -1957,14 +2109,22 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: ) assert response.retries_taken == failures_before_success - assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success + assert ( + int(response.http_request.headers.get("x-stainless-retry-count")) + == failures_before_success + ) @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch( + "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout + ) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_omit_retry_count_header( - self, async_client: AsyncGradient, failures_before_success: int, respx_mock: MockRouter + self, + async_client: AsyncGradient, + failures_before_success: int, + respx_mock: MockRouter, ) -> None: client = async_client.with_options(max_retries=4) @@ -1990,14 +2150,21 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: extra_headers={"x-stainless-retry-count": Omit()}, ) - assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 + assert ( + len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 + ) @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch( + "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout + ) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_overwrite_retry_count_header( - self, async_client: AsyncGradient, failures_before_success: int, respx_mock: MockRouter + self, + async_client: AsyncGradient, + failures_before_success: int, + respx_mock: MockRouter, ) -> None: client = async_client.with_options(max_retries=4) @@ -2031,7 +2198,8 @@ def test_get_platform(self) -> None: # # Since nest_asyncio.apply() is global and cannot be un-applied, this # test is run in a separate process to avoid affecting other tests. - test_code = dedent(""" + test_code = dedent( + """ import asyncio import nest_asyncio import threading @@ -2047,7 +2215,8 @@ async def test_main() -> None: nest_asyncio.apply() asyncio.run(test_main()) - """) + """ + ) with subprocess.Popen( [sys.executable, "-c", test_code], text=True, @@ -2059,18 +2228,24 @@ async def test_main() -> None: return_code = process.poll() if return_code is not None: if return_code != 0: - raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code") + raise AssertionError( + "calling get_platform using asyncify resulted in a non-zero exit code" + ) # success break if time.monotonic() - start_time > timeout: process.kill() - raise AssertionError("calling get_platform using asyncify resulted in a hung process") + raise AssertionError( + "calling get_platform using asyncify resulted in a hung process" + ) time.sleep(0.1) - async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: + async def test_proxy_environment_variables( + self, monkeypatch: pytest.MonkeyPatch + ) -> None: # Test that the proxy environment variables are set correctly monkeypatch.setenv("HTTPS_PROXY", "https://example.org") @@ -2096,11 +2271,17 @@ async def test_default_client_creation(self) -> None: async def test_follow_redirects(self, respx_mock: MockRouter) -> None: # Test that the default follow_redirects=True allows following redirects respx_mock.post("/redirect").mock( - return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + return_value=httpx.Response( + 302, headers={"Location": f"{base_url}/redirected"} + ) + ) + respx_mock.get("/redirected").mock( + return_value=httpx.Response(200, json={"status": "ok"}) ) - respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) - response = await self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + response = await self.client.post( + "/redirect", body={"key": "value"}, cast_to=httpx.Response + ) assert response.status_code == 200 assert response.json() == {"status": "ok"} @@ -2108,12 +2289,17 @@ async def test_follow_redirects(self, respx_mock: MockRouter) -> None: async def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: # Test that follow_redirects=False prevents following redirects respx_mock.post("/redirect").mock( - return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) + return_value=httpx.Response( + 302, headers={"Location": f"{base_url}/redirected"} + ) ) with pytest.raises(APIStatusError) as exc_info: await self.client.post( - "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response + "/redirect", + body={"key": "value"}, + options={"follow_redirects": False}, + cast_to=httpx.Response, ) assert exc_info.value.response.status_code == 302 From b74952e665a92a50937f475ef68331d85d96e018 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 19:42:50 +0000 Subject: [PATCH 4/7] feat(api): make kwargs match the env vars --- .stats.yml | 2 +- README.md | 12 +- src/gradient/_client.py | 222 +++++++++++++++--------- tests/conftest.py | 20 +-- tests/test_client.py | 374 ++++++++++++++++++++-------------------- 5 files changed, 343 insertions(+), 287 deletions(-) diff --git a/.stats.yml b/.stats.yml index b57aebbe..7b81dd11 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 170 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-9aca3802735e1375125412aa28ac36bf2175144b8218610a73d2e7f775694dff.yml openapi_spec_hash: e29d14e3e4679fcf22b3e760e49931b1 -config_hash: 136e1973eb6297e6308a165594bd00a3 +config_hash: 99e3cd5dde0beb796f4547410869f726 diff --git a/README.md b/README.md index 41f16c3d..d0eff5a2 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,9 @@ import os from gradient import Gradient client = Gradient( - api_key=os.environ.get("DIGITALOCEAN_ACCESS_TOKEN"), # This is the default and can be omitted + access_token=os.environ.get( + "DIGITALOCEAN_ACCESS_TOKEN" + ), # This is the default and can be omitted ) inference_client = Gradient( inference_key=os.environ.get( @@ -90,7 +92,7 @@ print("--- Agent Inference") print(agent_response.choices[0].message.content) ``` -While you can provide an `api_key`, `inference_key` keyword argument, +While you can provide an `access_token`, `model_access_key` keyword argument, we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) to add `DIGITALOCEAN_ACCESS_TOKEN="My API Key"`, `GRADIENT_MODEL_ACCESS_KEY="My INFERENCE Key"` to your `.env` file so that your keys are not stored in source control. @@ -105,7 +107,9 @@ import asyncio from gradient import AsyncGradient client = AsyncGradient( - api_key=os.environ.get("DIGITALOCEAN_ACCESS_TOKEN"), # This is the default and can be omitted + access_token=os.environ.get( + "DIGITALOCEAN_ACCESS_TOKEN" + ), # This is the default and can be omitted ) @@ -148,7 +152,7 @@ from gradient import AsyncGradient async def main() -> None: async with AsyncGradient( - api_key="My API Key", + access_token="My Access Token", http_client=DefaultAioHttpClient(), ) as client: completion = await client.chat.completions.create( diff --git a/src/gradient/_client.py b/src/gradient/_client.py index a51a38db..bb89769a 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -71,18 +71,23 @@ class Gradient(SyncAPIClient): # client options - api_key: str | None - inference_key: str | None - agent_key: str | None + access_token: str | None + model_access_key: str | None + agent_access_key: str | None _agent_endpoint: str | None + inference_endpoint: str | None def __init__( self, *, - api_key: str | None = None, - inference_key: str | None = None, - agent_key: str | None = None, + api_key: str | None = None, # deprecated, use `access_token` instead + inference_key: str | None = None, # deprecated, use `model_access_key` instead + agent_key: str | None = None, # deprecated, use `agent_access_key` instead + access_token: str | None = None, + model_access_key: str | None = None, + agent_access_key: str | None = None, agent_endpoint: str | None = None, + inference_endpoint: str | None = None, base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, @@ -105,34 +110,45 @@ def __init__( """Construct a new synchronous Gradient client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - - `api_key` from `DIGITALOCEAN_ACCESS_TOKEN` - - `inference_key` from `GRADIENT_MODEL_ACCESS_KEY` - - `agent_key` from `GRADIENT_AGENT_ACCESS_KEY` + - `access_token` from `DIGITALOCEAN_ACCESS_TOKEN` + - `model_access_key` from `GRADIENT_MODEL_ACCESS_KEY` + - `agent_access_key` from `GRADIENT_AGENT_ACCESS_KEY` """ - if api_key is None: - api_key = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") - # support for legacy environment variable - if api_key is None: - api_key = os.environ.get("GRADIENT_API_KEY") - self.api_key = api_key - - if inference_key is None: - inference_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY") - # support for legacy environment variable - if inference_key is None: - inference_key = os.environ.get("GRADIENT_INFERENCE_KEY") - - self.inference_key = inference_key - - if agent_key is None: - agent_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") - # support for legacy environment variable - if agent_key is None: - agent_key = os.environ.get("GRADIENT_AGENT_KEY") - self.agent_key = agent_key + if access_token is None: + if api_key is not None: + access_token = api_key + else: + access_token = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") + # support for legacy environment variable + if access_token is None: + access_token = os.environ.get("GRADIENT_API_KEY") + self.access_token = access_token + + + if model_access_key is None: + if inference_key is not None: + model_access_key = inference_key + else: + model_access_key = os.environ.get("GRADIENT_INFERENCE_KEY") + # support for legacy environment variable + if model_access_key is None: + model_access_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY") + self.model_access_key = model_access_key + + if agent_access_key is None: + if agent_key is not None: + agent_access_key = agent_key + else: + agent_access_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") + # support for legacy environment variable + if agent_access_key is None: + agent_access_key = os.environ.get("GRADIENT_AGENT_KEY") + self.agent_access_key = agent_access_key self._agent_endpoint = agent_endpoint + self.inference_endpoint = inference_endpoint + if base_url is None: base_url = os.environ.get("GRADIENT_BASE_URL") self._base_url_overridden = base_url is not None @@ -229,10 +245,10 @@ def qs(self) -> Querystring: @property @override def auth_headers(self) -> dict[str, str]: - api_key = self.api_key - if api_key is None: + access_token = self.access_token + if access_token is None: return {} - return {"Authorization": f"Bearer {api_key}"} + return {"Authorization": f"Bearer {access_token}"} @property @override @@ -245,24 +261,28 @@ def default_headers(self) -> dict[str, str | Omit]: @override def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: - if (self.api_key or self.agent_key or self.inference_key) and headers.get( - "Authorization" - ): + if ( + self.access_token or self.agent_access_key or self.model_access_key + ) and headers.get("Authorization"): return if isinstance(custom_headers.get("Authorization"), Omit): return raise TypeError( - '"Could not resolve authentication method. Expected api_key, agent_key, or inference_key to be set. Or for the `Authorization` headers to be explicitly omitted"' + '"Could not resolve authentication method. Expected access_token, agent_access_key, or model_access_key to be set. Or for the `Authorization` headers to be explicitly omitted"' ) def copy( self, *, - api_key: str | None = None, - inference_key: str | None = None, - agent_key: str | None = None, + api_key: str | None = None, # deprecated, use `access_token` instead + inference_key: str | None = None, # deprecated, use `model_access_key` instead + agent_key: str | None = None, # deprecated, use `agent_access_key` instead + access_token: str | None = None, + model_access_key: str | None = None, + agent_access_key: str | None = None, agent_endpoint: str | None = None, + inference_endpoint: str | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.Client | None = None, @@ -300,10 +320,11 @@ def copy( http_client = http_client or self._client client = self.__class__( - api_key=api_key or self.api_key, - inference_key=inference_key or self.inference_key, - agent_key=agent_key or self.agent_key, - agent_endpoint=agent_endpoint or self._agent_endpoint, + access_token=access_token or api_key or self.access_token, + model_access_key=model_access_key or inference_key or self.model_access_key, + agent_access_key=agent_access_key or agent_key or self.agent_access_key, + agent_endpoint=agent_endpoint or self.agent_endpoint, + inference_endpoint=inference_endpoint or self.inference_endpoint, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, http_client=http_client, @@ -363,18 +384,23 @@ def _make_status_error( class AsyncGradient(AsyncAPIClient): # client options - api_key: str | None - inference_key: str | None - agent_key: str | None + access_token: str | None + model_access_key: str | None + agent_access_key: str | None _agent_endpoint: str | None + inference_endpoint: str | None def __init__( self, *, - api_key: str | None = None, - inference_key: str | None = None, - agent_key: str | None = None, + api_key: str | None = None, # deprecated, use `access_token` instead + inference_key: str | None = None, # deprecated, use `model_access_key` instead + agent_key: str | None = None, # deprecated, use `agent_access_key` instead + access_token: str | None = None, + model_access_key: str | None = None, + agent_access_key: str | None = None, agent_endpoint: str | None = None, + inference_endpoint: str | None = None, base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, @@ -397,34 +423,45 @@ def __init__( """Construct a new async AsyncGradient client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - - `api_key` from `DIGITALOCEAN_ACCESS_TOKEN` - - `inference_key` from `GRADIENT_MODEL_ACCESS_KEY` - - `agent_key` from `GRADIENT_AGENT_ACCESS_KEY` + - `access_token` from `DIGITALOCEAN_ACCESS_TOKEN` + - `model_access_key` from `GRADIENT_MODEL_ACCESS_KEY` + - `agent_access_key` from `GRADIENT_AGENT_ACCESS_KEY` """ - if api_key is None: - api_key = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") - # support for legacy environment variable - if api_key is None: - api_key = os.environ.get("GRADIENT_API_KEY") - self.api_key = api_key - - if inference_key is None: - inference_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY") - # support for legacy environment variable - if inference_key is None: - inference_key = os.environ.get("GRADIENT_INFERENCE_KEY") - self.api_key = api_key - self.inference_key = inference_key - - if agent_key is None: - agent_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") - # support for legacy environment variable - if agent_key is None: - agent_key = os.environ.get("GRADIENT_AGENT_KEY") - self.agent_key = agent_key + if access_token is None: + if api_key is not None: + access_token = api_key + else: + access_token = os.environ.get("DIGITALOCEAN_ACCESS_TOKEN") + # support for legacy environment variable + if access_token is None: + access_token = os.environ.get("GRADIENT_API_KEY") + self.access_token = access_token + + + if model_access_key is None: + if inference_key is not None: + model_access_key = inference_key + else: + model_access_key = os.environ.get("GRADIENT_INFERENCE_KEY") + # support for legacy environment variable + if model_access_key is None: + model_access_key = os.environ.get("GRADIENT_MODEL_ACCESS_KEY") + self.model_access_key = model_access_key + + if agent_access_key is None: + if agent_key is not None: + agent_access_key = agent_key + else: + agent_access_key = os.environ.get("GRADIENT_AGENT_ACCESS_KEY") + # support for legacy environment variable + if agent_access_key is None: + agent_access_key = os.environ.get("GRADIENT_AGENT_KEY") + self.agent_access_key = agent_access_key self._agent_endpoint = agent_endpoint + self.inference_endpoint = inference_endpoint + if base_url is None: base_url = os.environ.get("GRADIENT_BASE_URL") self._base_url_overridden = base_url is not None @@ -521,10 +558,10 @@ def qs(self) -> Querystring: @property @override def auth_headers(self) -> dict[str, str]: - api_key = self.api_key - if api_key is None: + access_token = self.access_token + if access_token is None: return {} - return {"Authorization": f"Bearer {api_key}"} + return {"Authorization": f"Bearer {access_token}"} @property @override @@ -537,24 +574,28 @@ def default_headers(self) -> dict[str, str | Omit]: @override def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: - if (self.api_key or self.agent_key or self.inference_key) and headers.get( - "Authorization" - ): + if ( + self.access_token or self.agent_access_key or self.model_access_key + ) and headers.get("Authorization"): return if isinstance(custom_headers.get("Authorization"), Omit): return raise TypeError( - '"Could not resolve authentication method. Expected api_key, agent_key, or inference_key to be set. Or for the `Authorization` headers to be explicitly omitted"' + '"Could not resolve authentication method. Expected access_token, agent_access_key, or model_access_key to be set. Or for the `Authorization` headers to be explicitly omitted"' ) def copy( self, *, - api_key: str | None = None, - inference_key: str | None = None, - agent_key: str | None = None, + api_key: str | None = None, # deprecated, use `access_token` instead + inference_key: str | None = None, # deprecated, use `model_access_key` instead + agent_key: str | None = None, # deprecated, use `agent_access_key` instead agent_endpoint: str | None = None, + access_token: str | None = None, + model_access_key: str | None = None, + agent_access_key: str | None = None, + inference_endpoint: str | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.AsyncClient | None = None, @@ -592,10 +633,23 @@ def copy( http_client = http_client or self._client client = self.__class__( +<<<<<<< HEAD api_key=api_key or self.api_key, inference_key=inference_key or self.inference_key, agent_key=agent_key or self.agent_key, agent_endpoint=agent_endpoint or self._agent_endpoint, +||||||| eb1dcf7 + api_key=api_key or self.api_key, + inference_key=inference_key or self.inference_key, + agent_key=agent_key or self.agent_key, + agent_domain=agent_domain or self.agent_domain, +======= + access_token=access_token or self.access_token, + model_access_key=model_access_key or self.model_access_key, + agent_access_key=agent_access_key or self.agent_access_key, + agent_endpoint=agent_endpoint or self.agent_endpoint, + inference_endpoint=inference_endpoint or self.inference_endpoint, +>>>>>>> origin/generated--merge-conflict base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, http_client=http_client, diff --git a/tests/conftest.py b/tests/conftest.py index fecfc779..bc2aa92e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -45,10 +45,9 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") -api_key = "My API Key" -inference_key = "My Inference Key" -agent_key = "My Agent Key" -agent_endpoint = "https://inference.do-ai.run" +access_token = "My Access Token" +model_access_key = "My Model Access Key" +agent_access_key = "My Agent Access Key" @pytest.fixture(scope="session") @@ -59,10 +58,9 @@ def client(request: FixtureRequest) -> Iterator[Gradient]: with Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, - agent_endpoint=agent_endpoint, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=strict, ) as client: yield client @@ -90,9 +88,9 @@ async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncGradient]: async with AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=strict, http_client=http_client, ) as client: diff --git a/tests/test_client.py b/tests/test_client.py index 8eb2c616..347c89aa 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -42,9 +42,9 @@ from .utils import update_env base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") -api_key = "My API Key" -inference_key = "My Inference Key" -agent_key = "My Agent Key" +access_token = "My Access Token" +model_access_key = "My Model Access Key" +agent_access_key = "My Agent Access Key" def _get_params(client: BaseClient[Any, Any]) -> dict[str, str]: @@ -70,9 +70,9 @@ def _get_open_connections(client: Gradient | AsyncGradient) -> int: class TestGradient: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) @@ -106,17 +106,17 @@ def test_copy(self) -> None: copied = self.client.copy() assert id(copied) != id(self.client) - copied = self.client.copy(api_key="another My API Key") - assert copied.api_key == "another My API Key" - assert self.client.api_key == "My API Key" + copied = self.client.copy(access_token="another My Access Token") + assert copied.access_token == "another My Access Token" + assert self.client.access_token == "My Access Token" - copied = self.client.copy(inference_key="another My Inference Key") - assert copied.inference_key == "another My Inference Key" - assert self.client.inference_key == "My Inference Key" + copied = self.client.copy(model_access_key="another My Model Access Key") + assert copied.model_access_key == "another My Model Access Key" + assert self.client.model_access_key == "My Model Access Key" - copied = self.client.copy(agent_key="another My Agent Key") - assert copied.agent_key == "another My Agent Key" - assert self.client.agent_key == "My Agent Key" + copied = self.client.copy(agent_access_key="another My Agent Access Key") + assert copied.agent_access_key == "another My Agent Access Key" + assert self.client.agent_access_key == "My Agent Access Key" def test_copy_default_options(self) -> None: # options that have a default are overridden correctly @@ -137,9 +137,9 @@ def test_copy_default_options(self) -> None: def test_copy_default_headers(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -176,9 +176,9 @@ def test_copy_default_headers(self) -> None: def test_copy_default_query(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_query={"foo": "bar"}, ) @@ -316,9 +316,9 @@ def test_request_timeout(self) -> None: def test_client_timeout_option(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, timeout=httpx.Timeout(0), ) @@ -332,9 +332,9 @@ def test_http_client_timeout_option(self) -> None: with httpx.Client(timeout=None) as http_client: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=http_client, ) @@ -349,9 +349,9 @@ def test_http_client_timeout_option(self) -> None: with httpx.Client() as http_client: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=http_client, ) @@ -366,9 +366,9 @@ def test_http_client_timeout_option(self) -> None: with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=http_client, ) @@ -384,9 +384,9 @@ async def test_invalid_http_client(self) -> None: async with httpx.AsyncClient() as http_client: Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=cast(Any, http_client), ) @@ -394,9 +394,9 @@ async def test_invalid_http_client(self) -> None: def test_default_headers_option(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -406,9 +406,9 @@ def test_default_headers_option(self) -> None: client2 = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={ "X-Foo": "stainless", @@ -422,26 +422,26 @@ def test_default_headers_option(self) -> None: def test_validate_headers(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) - assert request.headers.get("Authorization") == f"Bearer {api_key}" + assert request.headers.get("Authorization") == f"Bearer {access_token}" with update_env(**{"DIGITALOCEAN_ACCESS_TOKEN": Omit()}): client2 = Gradient( base_url=base_url, - api_key=None, - inference_key=inference_key, - agent_key=agent_key, + access_token=None, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) with pytest.raises( TypeError, - match="Could not resolve authentication method. Expected api_key, agent_key, or inference_key to be set. Or for the `Authorization` headers to be explicitly omitted", + match="Could not resolve authentication method. Expected access_token, agent_access_key, or model_access_key to be set. Or for the `Authorization` headers to be explicitly omitted", ): client2._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -455,9 +455,9 @@ def test_validate_headers(self) -> None: def test_default_query_option(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_query={"query_param": "bar"}, ) @@ -671,9 +671,9 @@ class Model(BaseModel): def test_base_url_setter(self) -> None: client = Gradient( base_url="https://example.com/from_init", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) assert client.base_url == "https://example.com/from_init/" @@ -685,9 +685,9 @@ def test_base_url_setter(self) -> None: def test_base_url_env(self) -> None: with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"): client = Gradient( - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) assert client.base_url == "http://localhost:5000/from/env/" @@ -697,16 +697,16 @@ def test_base_url_env(self) -> None: [ Gradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ), Gradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.Client(), ), @@ -728,16 +728,16 @@ def test_base_url_trailing_slash(self, client: Gradient) -> None: [ Gradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ), Gradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.Client(), ), @@ -759,16 +759,16 @@ def test_base_url_no_trailing_slash(self, client: Gradient) -> None: [ Gradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ), Gradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.Client(), ), @@ -788,9 +788,9 @@ def test_absolute_request_url(self, client: Gradient) -> None: def test_copied_client_does_not_close_http(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) assert not client.is_closed() @@ -805,9 +805,9 @@ def test_copied_client_does_not_close_http(self) -> None: def test_client_context_manager(self) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) with client as c2: @@ -834,9 +834,9 @@ def test_client_max_retries_validation(self) -> None: with pytest.raises(TypeError, match=r"max_retries cannot be None"): Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, max_retries=cast(Any, None), ) @@ -867,9 +867,9 @@ class Model(BaseModel): strict_client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) @@ -878,9 +878,9 @@ class Model(BaseModel): client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=False, ) @@ -914,9 +914,9 @@ def test_parse_retry_after_header( ) -> None: client = Gradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) @@ -1150,9 +1150,9 @@ def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: class TestAsyncGradient: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) @@ -1188,17 +1188,17 @@ def test_copy(self) -> None: copied = self.client.copy() assert id(copied) != id(self.client) - copied = self.client.copy(api_key="another My API Key") - assert copied.api_key == "another My API Key" - assert self.client.api_key == "My API Key" + copied = self.client.copy(access_token="another My Access Token") + assert copied.access_token == "another My Access Token" + assert self.client.access_token == "My Access Token" - copied = self.client.copy(inference_key="another My Inference Key") - assert copied.inference_key == "another My Inference Key" - assert self.client.inference_key == "My Inference Key" + copied = self.client.copy(model_access_key="another My Model Access Key") + assert copied.model_access_key == "another My Model Access Key" + assert self.client.model_access_key == "My Model Access Key" - copied = self.client.copy(agent_key="another My Agent Key") - assert copied.agent_key == "another My Agent Key" - assert self.client.agent_key == "My Agent Key" + copied = self.client.copy(agent_access_key="another My Agent Access Key") + assert copied.agent_access_key == "another My Agent Access Key" + assert self.client.agent_access_key == "My Agent Access Key" def test_copy_default_options(self) -> None: # options that have a default are overridden correctly @@ -1219,9 +1219,9 @@ def test_copy_default_options(self) -> None: def test_copy_default_headers(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -1258,9 +1258,9 @@ def test_copy_default_headers(self) -> None: def test_copy_default_query(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_query={"foo": "bar"}, ) @@ -1398,9 +1398,9 @@ async def test_request_timeout(self) -> None: async def test_client_timeout_option(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, timeout=httpx.Timeout(0), ) @@ -1414,9 +1414,9 @@ async def test_http_client_timeout_option(self) -> None: async with httpx.AsyncClient(timeout=None) as http_client: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=http_client, ) @@ -1431,9 +1431,9 @@ async def test_http_client_timeout_option(self) -> None: async with httpx.AsyncClient() as http_client: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=http_client, ) @@ -1448,9 +1448,9 @@ async def test_http_client_timeout_option(self) -> None: async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=http_client, ) @@ -1466,9 +1466,9 @@ def test_invalid_http_client(self) -> None: with httpx.Client() as http_client: AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=cast(Any, http_client), ) @@ -1476,9 +1476,9 @@ def test_invalid_http_client(self) -> None: def test_default_headers_option(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}, ) @@ -1488,9 +1488,9 @@ def test_default_headers_option(self) -> None: client2 = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_headers={ "X-Foo": "stainless", @@ -1504,26 +1504,26 @@ def test_default_headers_option(self) -> None: def test_validate_headers(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) - assert request.headers.get("Authorization") == f"Bearer {api_key}" + assert request.headers.get("Authorization") == f"Bearer {access_token}" with update_env(**{"DIGITALOCEAN_ACCESS_TOKEN": Omit()}): client2 = AsyncGradient( base_url=base_url, - api_key=None, - inference_key=inference_key, - agent_key=agent_key, + access_token=None, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) with pytest.raises( TypeError, - match="Could not resolve authentication method. Expected api_key, agent_key, or inference_key to be set. Or for the `Authorization` headers to be explicitly omitted", + match="Could not resolve authentication method. Expected access_token, agent_access_key, or model_access_key to be set. Or for the `Authorization` headers to be explicitly omitted", ): client2._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -1537,9 +1537,9 @@ def test_validate_headers(self) -> None: def test_default_query_option(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, default_query={"query_param": "bar"}, ) @@ -1759,9 +1759,9 @@ class Model(BaseModel): def test_base_url_setter(self) -> None: client = AsyncGradient( base_url="https://example.com/from_init", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) assert client.base_url == "https://example.com/from_init/" @@ -1773,9 +1773,9 @@ def test_base_url_setter(self) -> None: def test_base_url_env(self) -> None: with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"): client = AsyncGradient( - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) assert client.base_url == "http://localhost:5000/from/env/" @@ -1785,16 +1785,16 @@ def test_base_url_env(self) -> None: [ AsyncGradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ), AsyncGradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), @@ -1816,16 +1816,16 @@ def test_base_url_trailing_slash(self, client: AsyncGradient) -> None: [ AsyncGradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ), AsyncGradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), @@ -1847,16 +1847,16 @@ def test_base_url_no_trailing_slash(self, client: AsyncGradient) -> None: [ AsyncGradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ), AsyncGradient( base_url="http://localhost:5000/custom/path/", - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), @@ -1876,9 +1876,9 @@ def test_absolute_request_url(self, client: AsyncGradient) -> None: async def test_copied_client_does_not_close_http(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) assert not client.is_closed() @@ -1894,9 +1894,9 @@ async def test_copied_client_does_not_close_http(self) -> None: async def test_client_context_manager(self) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) async with client as c2: @@ -1926,9 +1926,9 @@ async def test_client_max_retries_validation(self) -> None: with pytest.raises(TypeError, match=r"max_retries cannot be None"): AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, max_retries=cast(Any, None), ) @@ -1963,9 +1963,9 @@ class Model(BaseModel): strict_client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) @@ -1974,9 +1974,9 @@ class Model(BaseModel): client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=False, ) @@ -2011,9 +2011,9 @@ async def test_parse_retry_after_header( ) -> None: client = AsyncGradient( base_url=base_url, - api_key=api_key, - inference_key=inference_key, - agent_key=agent_key, + access_token=access_token, + model_access_key=model_access_key, + agent_access_key=agent_access_key, _strict_response_validation=True, ) From 68daceb4cf89b76fbf04e5111cea7541a989afed Mon Sep 17 00:00:00 2001 From: Ben Batha Date: Fri, 8 Aug 2025 16:22:06 -0400 Subject: [PATCH 5/7] fix: actually read env vars --- src/gradient/_client.py | 51 +++++----- .../resources/agents/chat/completions.py | 16 +-- src/gradient/resources/chat/completions.py | 99 +++++++++++++------ 3 files changed, 99 insertions(+), 67 deletions(-) diff --git a/src/gradient/_client.py b/src/gradient/_client.py index bb89769a..beebb320 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -80,9 +80,9 @@ class Gradient(SyncAPIClient): def __init__( self, *, - api_key: str | None = None, # deprecated, use `access_token` instead - inference_key: str | None = None, # deprecated, use `model_access_key` instead - agent_key: str | None = None, # deprecated, use `agent_access_key` instead + api_key: str | None = None, # deprecated, use `access_token` instead + inference_key: str | None = None, # deprecated, use `model_access_key` instead + agent_key: str | None = None, # deprecated, use `agent_access_key` instead access_token: str | None = None, model_access_key: str | None = None, agent_access_key: str | None = None, @@ -124,7 +124,6 @@ def __init__( access_token = os.environ.get("GRADIENT_API_KEY") self.access_token = access_token - if model_access_key is None: if inference_key is not None: model_access_key = inference_key @@ -145,8 +144,15 @@ def __init__( agent_access_key = os.environ.get("GRADIENT_AGENT_KEY") self.agent_access_key = agent_access_key + if agent_endpoint is None: + agent_endpoint = os.environ.get("GRADIENT_AGENT_ENDPOINT") self._agent_endpoint = agent_endpoint + if inference_endpoint is None: + inference_endpoint = os.environ.get("GRADIENT_INFERENCE_ENDPOINT") + if inference_endpoint is None: + inference_endpoint = "https://inference.do-ai.run" + self.inference_endpoint = inference_endpoint if base_url is None: @@ -275,9 +281,9 @@ def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: def copy( self, *, - api_key: str | None = None, # deprecated, use `access_token` instead - inference_key: str | None = None, # deprecated, use `model_access_key` instead - agent_key: str | None = None, # deprecated, use `agent_access_key` instead + api_key: str | None = None, # deprecated, use `access_token` instead + inference_key: str | None = None, # deprecated, use `model_access_key` instead + agent_key: str | None = None, # deprecated, use `agent_access_key` instead access_token: str | None = None, model_access_key: str | None = None, agent_access_key: str | None = None, @@ -393,9 +399,9 @@ class AsyncGradient(AsyncAPIClient): def __init__( self, *, - api_key: str | None = None, # deprecated, use `access_token` instead - inference_key: str | None = None, # deprecated, use `model_access_key` instead - agent_key: str | None = None, # deprecated, use `agent_access_key` instead + api_key: str | None = None, # deprecated, use `access_token` instead + inference_key: str | None = None, # deprecated, use `model_access_key` instead + agent_key: str | None = None, # deprecated, use `agent_access_key` instead access_token: str | None = None, model_access_key: str | None = None, agent_access_key: str | None = None, @@ -437,7 +443,6 @@ def __init__( access_token = os.environ.get("GRADIENT_API_KEY") self.access_token = access_token - if model_access_key is None: if inference_key is not None: model_access_key = inference_key @@ -588,9 +593,9 @@ def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: def copy( self, *, - api_key: str | None = None, # deprecated, use `access_token` instead - inference_key: str | None = None, # deprecated, use `model_access_key` instead - agent_key: str | None = None, # deprecated, use `agent_access_key` instead + api_key: str | None = None, # deprecated, use `access_token` instead + inference_key: str | None = None, # deprecated, use `model_access_key` instead + agent_key: str | None = None, # deprecated, use `agent_access_key` instead agent_endpoint: str | None = None, access_token: str | None = None, model_access_key: str | None = None, @@ -633,23 +638,11 @@ def copy( http_client = http_client or self._client client = self.__class__( -<<<<<<< HEAD - api_key=api_key or self.api_key, - inference_key=inference_key or self.inference_key, - agent_key=agent_key or self.agent_key, - agent_endpoint=agent_endpoint or self._agent_endpoint, -||||||| eb1dcf7 - api_key=api_key or self.api_key, - inference_key=inference_key or self.inference_key, - agent_key=agent_key or self.agent_key, - agent_domain=agent_domain or self.agent_domain, -======= - access_token=access_token or self.access_token, - model_access_key=model_access_key or self.model_access_key, - agent_access_key=agent_access_key or self.agent_access_key, + access_token=access_token or api_key or self.access_token, + model_access_key=model_access_key or inference_key or self.model_access_key, + agent_access_key=agent_access_key or agent_key or self.agent_access_key, agent_endpoint=agent_endpoint or self.agent_endpoint, inference_endpoint=inference_endpoint or self.inference_endpoint, ->>>>>>> origin/generated--merge-conflict base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, http_client=http_client, diff --git a/src/gradient/resources/agents/chat/completions.py b/src/gradient/resources/agents/chat/completions.py index 67f5fc47..540a7890 100644 --- a/src/gradient/resources/agents/chat/completions.py +++ b/src/gradient/resources/agents/chat/completions.py @@ -463,13 +463,13 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]: - # This method requires an agent_key to be set via client argument or environment variable - if not self._client.agent_key: + # This method requires an agent_access_key to be set via client argument or environment variable + if not self._client.agent_access_key: raise TypeError( - "Could not resolve authentication method. Expected agent_key to be set for chat completions." + "Could not resolve authentication method. Expected agent_access_key to be set for chat completions." ) headers = extra_headers or {} - headers = {"Authorization": f"Bearer {self._client.agent_key}", **headers} + headers = {"Authorization": f"Bearer {self._client.agent_access_key}", **headers} return self._post( ( @@ -951,13 +951,13 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]: - # This method requires an agent_key to be set via client argument or environment variable - if not self._client.agent_key: + # This method requires an agent_access_key to be set via client argument or environment variable + if not self._client.agent_access_key: raise TypeError( - "Could not resolve authentication method. Expected agent_key to be set for chat completions." + "Could not resolve authentication method. Expected agent_access_key to be set for chat completions." ) headers = extra_headers or {} - headers = {"Authorization": f"Bearer {self._client.agent_key}", **headers} + headers = {"Authorization": f"Bearer {self._client.agent_access_key}", **headers} return await self._post( ( diff --git a/src/gradient/resources/chat/completions.py b/src/gradient/resources/chat/completions.py index b1147020..18b2a17a 100644 --- a/src/gradient/resources/chat/completions.py +++ b/src/gradient/resources/chat/completions.py @@ -62,7 +62,9 @@ def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + stream_options: ( + Optional[completion_create_params.StreamOptions] | NotGiven + ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, @@ -191,7 +193,9 @@ def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + stream_options: ( + Optional[completion_create_params.StreamOptions] | NotGiven + ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, @@ -319,7 +323,9 @@ def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + stream_options: ( + Optional[completion_create_params.StreamOptions] | NotGiven + ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, @@ -447,7 +453,9 @@ def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + stream_options: ( + Optional[completion_create_params.StreamOptions] | NotGiven + ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, @@ -461,18 +469,23 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> CompletionCreateResponse | Stream[ChatCompletionChunk]: - # This method requires an inference_key to be set via client argument or environment variable - if not self._client.inference_key: + # This method requires an model_access_key to be set via client argument or environment variable + if not self._client.model_access_key: raise TypeError( - "Could not resolve authentication method. Expected inference_key to be set for chat completions." + "Could not resolve authentication method. Expected model_access_key to be set for chat completions." ) headers = extra_headers or {} - headers = {"Authorization": f"Bearer {self._client.inference_key}", **headers} + headers = { + "Authorization": f"Bearer {self._client.model_access_key}", + **headers, + } return self._post( - "/chat/completions" - if self._client._base_url_overridden - else "https://inference.do-ai.run/v1/chat/completions", + ( + "/chat/completions" + if self._client._base_url_overridden + else f"{self._client.inference_endpoint}/v1/chat/completions" + ), body=maybe_transform( { "messages": messages, @@ -495,12 +508,17 @@ def create( "top_p": top_p, "user": user, }, - completion_create_params.CompletionCreateParamsStreaming - if stream - else completion_create_params.CompletionCreateParamsNonStreaming, + ( + completion_create_params.CompletionCreateParamsStreaming + if stream + else completion_create_params.CompletionCreateParamsNonStreaming + ), ), options=make_request_options( - extra_headers=headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, ), cast_to=CompletionCreateResponse, stream=stream or False, @@ -544,7 +562,9 @@ async def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + stream_options: ( + Optional[completion_create_params.StreamOptions] | NotGiven + ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, @@ -673,7 +693,9 @@ async def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + stream_options: ( + Optional[completion_create_params.StreamOptions] | NotGiven + ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, @@ -801,7 +823,9 @@ async def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + stream_options: ( + Optional[completion_create_params.StreamOptions] | NotGiven + ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, @@ -929,7 +953,9 @@ async def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + stream_options: ( + Optional[completion_create_params.StreamOptions] | NotGiven + ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, @@ -943,18 +969,26 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> CompletionCreateResponse | AsyncStream[ChatCompletionChunk]: - # This method requires an inference_key to be set via client argument or environment variable - if not hasattr(self._client, "inference_key") or not self._client.inference_key: + # This method requires an model_access_key to be set via client argument or environment variable + if ( + not hasattr(self._client, "model_access_key") + or not self._client.model_access_key + ): raise TypeError( - "Could not resolve authentication method. Expected inference_key to be set for chat completions." + "Could not resolve authentication method. Expected model_access_key to be set for chat completions." ) headers = extra_headers or {} - headers = {"Authorization": f"Bearer {self._client.inference_key}", **headers} + headers = { + "Authorization": f"Bearer {self._client.model_access_key}", + **headers, + } return await self._post( - "/chat/completions" - if self._client._base_url_overridden - else "https://inference.do-ai.run/v1/chat/completions", + ( + "/chat/completions" + if self._client._base_url_overridden + else f"{self._client.inference_endpoint}/chat/completions" + ), body=await async_maybe_transform( { "messages": messages, @@ -977,12 +1011,17 @@ async def create( "top_p": top_p, "user": user, }, - completion_create_params.CompletionCreateParamsStreaming - if stream - else completion_create_params.CompletionCreateParamsNonStreaming, + ( + completion_create_params.CompletionCreateParamsStreaming + if stream + else completion_create_params.CompletionCreateParamsNonStreaming + ), ), options=make_request_options( - extra_headers=headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, ), cast_to=CompletionCreateResponse, stream=stream or False, From 4bd6ace92d2dbfe1364c5f5aa8e0bf5899e8fc16 Mon Sep 17 00:00:00 2001 From: Ben Batha Date: Fri, 8 Aug 2025 16:24:39 -0400 Subject: [PATCH 6/7] fix: use of cached variable in internals --- src/gradient/_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/gradient/_client.py b/src/gradient/_client.py index beebb320..f5866900 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -329,7 +329,7 @@ def copy( access_token=access_token or api_key or self.access_token, model_access_key=model_access_key or inference_key or self.model_access_key, agent_access_key=agent_access_key or agent_key or self.agent_access_key, - agent_endpoint=agent_endpoint or self.agent_endpoint, + agent_endpoint=agent_endpoint or self._agent_endpoint, inference_endpoint=inference_endpoint or self.inference_endpoint, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, @@ -641,7 +641,7 @@ def copy( access_token=access_token or api_key or self.access_token, model_access_key=model_access_key or inference_key or self.model_access_key, agent_access_key=agent_access_key or agent_key or self.agent_access_key, - agent_endpoint=agent_endpoint or self.agent_endpoint, + agent_endpoint=agent_endpoint or self._agent_endpoint, inference_endpoint=inference_endpoint or self.inference_endpoint, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, From 377fb5317d519a7e863720020bca3f8a2ab15b3a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 20:25:02 +0000 Subject: [PATCH 7/7] release: 3.0.0-beta.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 21 +++++++++++++++++++++ pyproject.toml | 2 +- src/gradient/_version.py | 2 +- 4 files changed, 24 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d25059a8..5e212f31 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.0.0-beta.2" + ".": "3.0.0-beta.3" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 594ea379..d41133fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Changelog +## 3.0.0-beta.3 (2025-08-08) + +Full Changelog: [v3.0.0-beta.2...v3.0.0-beta.3](https://github.com/digitalocean/gradient-python/compare/v3.0.0-beta.2...v3.0.0-beta.3) + +### Features + +* **api:** make kwargs match the env vars ([b74952e](https://github.com/digitalocean/gradient-python/commit/b74952e665a92a50937f475ef68331d85d96e018)) +* **api:** rename environment variables ([ed70ab7](https://github.com/digitalocean/gradient-python/commit/ed70ab72ce3faecd7fb5070f429275518b7aa6f2)) + + +### Bug Fixes + +* actually read env vars ([68daceb](https://github.com/digitalocean/gradient-python/commit/68daceb4cf89b76fbf04e5111cea7541a989afed)) +* **config:** align environment variables with other DO tools and console ([#40](https://github.com/digitalocean/gradient-python/issues/40)) ([#41](https://github.com/digitalocean/gradient-python/issues/41)) ([6853d05](https://github.com/digitalocean/gradient-python/commit/6853d0542055a29a70685cab67414e5612890c7d)) +* use of cached variable in internals ([4bd6ace](https://github.com/digitalocean/gradient-python/commit/4bd6ace92d2dbfe1364c5f5aa8e0bf5899e8fc16)) + + +### Chores + +* **internal:** fix ruff target version ([b370349](https://github.com/digitalocean/gradient-python/commit/b370349a68d24b00854e3f54df50c86f2c29651b)) + ## 3.0.0-beta.2 (2025-08-04) Full Changelog: [v3.0.0-beta.1...v3.0.0-beta.2](https://github.com/digitalocean/gradient-python/compare/v3.0.0-beta.1...v3.0.0-beta.2) diff --git a/pyproject.toml b/pyproject.toml index 348648c5..813edb3e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.0.0-beta.2" +version = "3.0.0-beta.3" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_version.py b/src/gradient/_version.py index 5bf5b229..483c7ac9 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.0.0-beta.2" # x-release-please-version +__version__ = "3.0.0-beta.3" # x-release-please-version