From b900d769ba4a290523f17d2d69de850366c961b6 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 30 Sep 2025 15:56:18 +0000
Subject: [PATCH 01/11] feat(api): update via SDK Studio
---
.stats.yml | 2 +-
src/gradient/types/images/__init__.py | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
create mode 100644 src/gradient/types/images/__init__.py
diff --git a/.stats.yml b/.stats.yml
index e30c19b7..c088653e 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 173
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-621c3ebf5011c5ca508f78fccbea17de4ca6b35bfe99578c1ae2265021578d6f.yml
openapi_spec_hash: e29d14e3e4679fcf22b3e760e49931b1
-config_hash: 3d425c415b7f7ab581418b43eb521cb3
+config_hash: a8e1b6dcbc8b1126938d320837c5926d
diff --git a/src/gradient/types/images/__init__.py b/src/gradient/types/images/__init__.py
new file mode 100644
index 00000000..f8ee8b14
--- /dev/null
+++ b/src/gradient/types/images/__init__.py
@@ -0,0 +1,3 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
From a5f6aa656021a9aaa6a2e82dfa251f87f0096de0 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 30 Sep 2025 16:00:17 +0000
Subject: [PATCH 02/11] feat(api): update via SDK Studio
---
.stats.yml | 6 +-
api.md | 14 +
src/gradient/_client.py | 39 ++-
src/gradient/resources/__init__.py | 14 +
src/gradient/resources/images/__init__.py | 33 ++
src/gradient/resources/images/generations.py | 307 ++++++++++++++++++
src/gradient/resources/images/images.py | 102 ++++++
.../agents/chat/completion_create_params.py | 68 +++-
.../agents/chat/completion_create_response.py | 3 +
.../types/chat/completion_create_params.py | 68 +++-
.../types/chat/completion_create_response.py | 3 +
src/gradient/types/images/__init__.py | 3 +
.../types/images/generation_create_params.py | 85 +++++
.../images/generation_create_response.py | 63 ++++
.../types/shared/chat_completion_chunk.py | 3 +
src/gradient/types/shared/size.py | 2 +-
tests/api_resources/images/__init__.py | 1 +
.../api_resources/images/test_generations.py | 130 ++++++++
18 files changed, 931 insertions(+), 13 deletions(-)
create mode 100644 src/gradient/resources/images/__init__.py
create mode 100644 src/gradient/resources/images/generations.py
create mode 100644 src/gradient/resources/images/images.py
create mode 100644 src/gradient/types/images/generation_create_params.py
create mode 100644 src/gradient/types/images/generation_create_response.py
create mode 100644 tests/api_resources/images/__init__.py
create mode 100644 tests/api_resources/images/test_generations.py
diff --git a/.stats.yml b/.stats.yml
index c088653e..ac604dc7 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 173
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-621c3ebf5011c5ca508f78fccbea17de4ca6b35bfe99578c1ae2265021578d6f.yml
-openapi_spec_hash: e29d14e3e4679fcf22b3e760e49931b1
+configured_endpoints: 174
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-3364ccdab713948eb06e9f77fb3b2046d0627ac2c18d389ef60d91bdfbeb2dd7.yml
+openapi_spec_hash: 83a3d092965fde776b29b61f785459f9
config_hash: a8e1b6dcbc8b1126938d320837c5926d
diff --git a/api.md b/api.md
index 7299b3c6..f416429a 100644
--- a/api.md
+++ b/api.md
@@ -383,6 +383,20 @@ Methods:
- client.chat.completions.create(\*\*params) -> CompletionCreateResponse
+# Images
+
+## Generations
+
+Types:
+
+```python
+from gradient.types.images import GenerationCreateResponse
+```
+
+Methods:
+
+- client.images.generations.create(\*\*params) -> GenerationCreateResponse
+
# GPUDroplets
Types:
diff --git a/src/gradient/_client.py b/src/gradient/_client.py
index 338d343e..0a33fed9 100644
--- a/src/gradient/_client.py
+++ b/src/gradient/_client.py
@@ -34,7 +34,7 @@
if TYPE_CHECKING:
from .resources import (
chat,
- agents,
+ agents, images,
models,
regions,
databases,
@@ -49,6 +49,7 @@
AsyncGPUDropletsResource,
)
from .resources.agents.agents import AgentsResource, AsyncAgentsResource
+ from .resources.images.images import ImagesResource, AsyncImagesResource
from .resources.models.models import ModelsResource, AsyncModelsResource
from .resources.databases.databases import DatabasesResource, AsyncDatabasesResource
from .resources.inference.inference import InferenceResource, AsyncInferenceResource
@@ -182,6 +183,12 @@ def chat(self) -> ChatResource:
return ChatResource(self)
+ @cached_property
+ def images(self) -> ImagesResource:
+ from .resources.images import ImagesResource
+
+ return ImagesResource(self)
+
@cached_property
def gpu_droplets(self) -> GPUDropletsResource:
from .resources.gpu_droplets import GPUDropletsResource
@@ -471,6 +478,12 @@ def chat(self) -> AsyncChatResource:
return AsyncChatResource(self)
+ @cached_property
+ def images(self) -> AsyncImagesResource:
+ from .resources.images import AsyncImagesResource
+
+ return AsyncImagesResource(self)
+
@cached_property
def gpu_droplets(self) -> AsyncGPUDropletsResource:
from .resources.gpu_droplets import AsyncGPUDropletsResource
@@ -665,6 +678,12 @@ def chat(self) -> chat.ChatResourceWithRawResponse:
return ChatResourceWithRawResponse(self._client.chat)
+ @cached_property
+ def images(self) -> images.ImagesResourceWithRawResponse:
+ from .resources.images import ImagesResourceWithRawResponse
+
+ return ImagesResourceWithRawResponse(self._client.images)
+
@cached_property
def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithRawResponse:
from .resources.gpu_droplets import GPUDropletsResourceWithRawResponse
@@ -720,6 +739,12 @@ def chat(self) -> chat.AsyncChatResourceWithRawResponse:
return AsyncChatResourceWithRawResponse(self._client.chat)
+ @cached_property
+ def images(self) -> images.AsyncImagesResourceWithRawResponse:
+ from .resources.images import AsyncImagesResourceWithRawResponse
+
+ return AsyncImagesResourceWithRawResponse(self._client.images)
+
@cached_property
def gpu_droplets(self) -> gpu_droplets.AsyncGPUDropletsResourceWithRawResponse:
from .resources.gpu_droplets import AsyncGPUDropletsResourceWithRawResponse
@@ -779,6 +804,12 @@ def chat(self) -> chat.ChatResourceWithStreamingResponse:
return ChatResourceWithStreamingResponse(self._client.chat)
+ @cached_property
+ def images(self) -> images.ImagesResourceWithStreamingResponse:
+ from .resources.images import ImagesResourceWithStreamingResponse
+
+ return ImagesResourceWithStreamingResponse(self._client.images)
+
@cached_property
def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithStreamingResponse:
from .resources.gpu_droplets import GPUDropletsResourceWithStreamingResponse
@@ -838,6 +869,12 @@ def chat(self) -> chat.AsyncChatResourceWithStreamingResponse:
return AsyncChatResourceWithStreamingResponse(self._client.chat)
+ @cached_property
+ def images(self) -> images.AsyncImagesResourceWithStreamingResponse:
+ from .resources.images import AsyncImagesResourceWithStreamingResponse
+
+ return AsyncImagesResourceWithStreamingResponse(self._client.images)
+
@cached_property
def gpu_droplets(
self,
diff --git a/src/gradient/resources/__init__.py b/src/gradient/resources/__init__.py
index d5198560..fdc7d346 100644
--- a/src/gradient/resources/__init__.py
+++ b/src/gradient/resources/__init__.py
@@ -16,6 +16,14 @@
AgentsResourceWithStreamingResponse,
AsyncAgentsResourceWithStreamingResponse,
)
+from .images import (
+ ImagesResource,
+ AsyncImagesResource,
+ ImagesResourceWithRawResponse,
+ AsyncImagesResourceWithRawResponse,
+ ImagesResourceWithStreamingResponse,
+ AsyncImagesResourceWithStreamingResponse,
+)
from .models import (
ModelsResource,
AsyncModelsResource,
@@ -78,6 +86,12 @@
"AsyncChatResourceWithRawResponse",
"ChatResourceWithStreamingResponse",
"AsyncChatResourceWithStreamingResponse",
+ "ImagesResource",
+ "AsyncImagesResource",
+ "ImagesResourceWithRawResponse",
+ "AsyncImagesResourceWithRawResponse",
+ "ImagesResourceWithStreamingResponse",
+ "AsyncImagesResourceWithStreamingResponse",
"GPUDropletsResource",
"AsyncGPUDropletsResource",
"GPUDropletsResourceWithRawResponse",
diff --git a/src/gradient/resources/images/__init__.py b/src/gradient/resources/images/__init__.py
new file mode 100644
index 00000000..cf187f1d
--- /dev/null
+++ b/src/gradient/resources/images/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .images import (
+ ImagesResource,
+ AsyncImagesResource,
+ ImagesResourceWithRawResponse,
+ AsyncImagesResourceWithRawResponse,
+ ImagesResourceWithStreamingResponse,
+ AsyncImagesResourceWithStreamingResponse,
+)
+from .generations import (
+ GenerationsResource,
+ AsyncGenerationsResource,
+ GenerationsResourceWithRawResponse,
+ AsyncGenerationsResourceWithRawResponse,
+ GenerationsResourceWithStreamingResponse,
+ AsyncGenerationsResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "GenerationsResource",
+ "AsyncGenerationsResource",
+ "GenerationsResourceWithRawResponse",
+ "AsyncGenerationsResourceWithRawResponse",
+ "GenerationsResourceWithStreamingResponse",
+ "AsyncGenerationsResourceWithStreamingResponse",
+ "ImagesResource",
+ "AsyncImagesResource",
+ "ImagesResourceWithRawResponse",
+ "AsyncImagesResourceWithRawResponse",
+ "ImagesResourceWithStreamingResponse",
+ "AsyncImagesResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/images/generations.py b/src/gradient/resources/images/generations.py
new file mode 100644
index 00000000..32701d46
--- /dev/null
+++ b/src/gradient/resources/images/generations.py
@@ -0,0 +1,307 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.images import generation_create_params
+from ...types.images.generation_create_response import GenerationCreateResponse
+
+__all__ = ["GenerationsResource", "AsyncGenerationsResource"]
+
+
+class GenerationsResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> GenerationsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return GenerationsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> GenerationsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return GenerationsResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ prompt: str,
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ stream: Optional[bool] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GenerationCreateResponse:
+ """
+ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
+ image generation model with automatic prompt optimization and enhanced visual
+ capabilities.
+
+ Args:
+ prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000
+ characters and provides automatic prompt optimization for best results.
+
+ background:
+ The background setting for the image generation. GPT-IMAGE-1 supports:
+ transparent, opaque, auto.
+
+ model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering
+ the best quality with automatic optimization and enhanced capabilities.
+
+ moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low,
+ auto.
+
+ n: The number of images to generate. GPT-IMAGE-1 only supports n=1.
+
+ output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100.
+
+ output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp,
+ jpeg.
+
+ partial_images: The number of partial image chunks to return during streaming generation. This
+ parameter is optional with a default of 0. When stream=true, this must be
+ greater than 0 to receive progressive updates of the image as it's being
+ generated. Higher values provide more frequent updates but may increase response
+ overhead.
+
+ quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto
+ (automatically select best quality), high, medium, low.
+
+ size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically
+ select best size), 1536x1024 (landscape), 1024x1536 (portrait).
+
+ stream: If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/images/generations"
+ if self._client._base_url_overridden
+ else "https://inference.do-ai.run/v1/images/generations",
+ body=maybe_transform(
+ {
+ "prompt": prompt,
+ "background": background,
+ "model": model,
+ "moderation": moderation,
+ "n": n,
+ "output_compression": output_compression,
+ "output_format": output_format,
+ "partial_images": partial_images,
+ "quality": quality,
+ "size": size,
+ "stream": stream,
+ "user": user,
+ },
+ generation_create_params.GenerationCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=GenerationCreateResponse,
+ )
+
+
+class AsyncGenerationsResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncGenerationsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncGenerationsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncGenerationsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncGenerationsResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ prompt: str,
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ stream: Optional[bool] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GenerationCreateResponse:
+ """
+ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
+ image generation model with automatic prompt optimization and enhanced visual
+ capabilities.
+
+ Args:
+ prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000
+ characters and provides automatic prompt optimization for best results.
+
+ background:
+ The background setting for the image generation. GPT-IMAGE-1 supports:
+ transparent, opaque, auto.
+
+ model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering
+ the best quality with automatic optimization and enhanced capabilities.
+
+ moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low,
+ auto.
+
+ n: The number of images to generate. GPT-IMAGE-1 only supports n=1.
+
+ output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100.
+
+ output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp,
+ jpeg.
+
+ partial_images: The number of partial image chunks to return during streaming generation. This
+ parameter is optional with a default of 0. When stream=true, this must be
+ greater than 0 to receive progressive updates of the image as it's being
+ generated. Higher values provide more frequent updates but may increase response
+ overhead.
+
+ quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto
+ (automatically select best quality), high, medium, low.
+
+ size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically
+ select best size), 1536x1024 (landscape), 1024x1536 (portrait).
+
+ stream: If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/images/generations"
+ if self._client._base_url_overridden
+ else "https://inference.do-ai.run/v1/images/generations",
+ body=await async_maybe_transform(
+ {
+ "prompt": prompt,
+ "background": background,
+ "model": model,
+ "moderation": moderation,
+ "n": n,
+ "output_compression": output_compression,
+ "output_format": output_format,
+ "partial_images": partial_images,
+ "quality": quality,
+ "size": size,
+ "stream": stream,
+ "user": user,
+ },
+ generation_create_params.GenerationCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=GenerationCreateResponse,
+ )
+
+
+class GenerationsResourceWithRawResponse:
+ def __init__(self, generations: GenerationsResource) -> None:
+ self._generations = generations
+
+ self.create = to_raw_response_wrapper(
+ generations.create,
+ )
+
+
+class AsyncGenerationsResourceWithRawResponse:
+ def __init__(self, generations: AsyncGenerationsResource) -> None:
+ self._generations = generations
+
+ self.create = async_to_raw_response_wrapper(
+ generations.create,
+ )
+
+
+class GenerationsResourceWithStreamingResponse:
+ def __init__(self, generations: GenerationsResource) -> None:
+ self._generations = generations
+
+ self.create = to_streamed_response_wrapper(
+ generations.create,
+ )
+
+
+class AsyncGenerationsResourceWithStreamingResponse:
+ def __init__(self, generations: AsyncGenerationsResource) -> None:
+ self._generations = generations
+
+ self.create = async_to_streamed_response_wrapper(
+ generations.create,
+ )
diff --git a/src/gradient/resources/images/images.py b/src/gradient/resources/images/images.py
new file mode 100644
index 00000000..37e7290f
--- /dev/null
+++ b/src/gradient/resources/images/images.py
@@ -0,0 +1,102 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from .generations import (
+ GenerationsResource,
+ AsyncGenerationsResource,
+ GenerationsResourceWithRawResponse,
+ AsyncGenerationsResourceWithRawResponse,
+ GenerationsResourceWithStreamingResponse,
+ AsyncGenerationsResourceWithStreamingResponse,
+)
+
+__all__ = ["ImagesResource", "AsyncImagesResource"]
+
+
+class ImagesResource(SyncAPIResource):
+ @cached_property
+ def generations(self) -> GenerationsResource:
+ return GenerationsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> ImagesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ImagesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ImagesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ImagesResourceWithStreamingResponse(self)
+
+
+class AsyncImagesResource(AsyncAPIResource):
+ @cached_property
+ def generations(self) -> AsyncGenerationsResource:
+ return AsyncGenerationsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncImagesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncImagesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncImagesResourceWithStreamingResponse(self)
+
+
+class ImagesResourceWithRawResponse:
+ def __init__(self, images: ImagesResource) -> None:
+ self._images = images
+
+ @cached_property
+ def generations(self) -> GenerationsResourceWithRawResponse:
+ return GenerationsResourceWithRawResponse(self._images.generations)
+
+
+class AsyncImagesResourceWithRawResponse:
+ def __init__(self, images: AsyncImagesResource) -> None:
+ self._images = images
+
+ @cached_property
+ def generations(self) -> AsyncGenerationsResourceWithRawResponse:
+ return AsyncGenerationsResourceWithRawResponse(self._images.generations)
+
+
+class ImagesResourceWithStreamingResponse:
+ def __init__(self, images: ImagesResource) -> None:
+ self._images = images
+
+ @cached_property
+ def generations(self) -> GenerationsResourceWithStreamingResponse:
+ return GenerationsResourceWithStreamingResponse(self._images.generations)
+
+
+class AsyncImagesResourceWithStreamingResponse:
+ def __init__(self, images: AsyncImagesResource) -> None:
+ self._images = images
+
+ @cached_property
+ def generations(self) -> AsyncGenerationsResourceWithStreamingResponse:
+ return AsyncGenerationsResourceWithStreamingResponse(self._images.generations)
diff --git a/src/gradient/types/agents/chat/completion_create_params.py b/src/gradient/types/agents/chat/completion_create_params.py
index d8cf7bc1..0980132e 100644
--- a/src/gradient/types/agents/chat/completion_create_params.py
+++ b/src/gradient/types/agents/chat/completion_create_params.py
@@ -11,9 +11,17 @@
"CompletionCreateParamsBase",
"Message",
"MessageChatCompletionRequestSystemMessage",
+ "MessageChatCompletionRequestSystemMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1",
"MessageChatCompletionRequestDeveloperMessage",
+ "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1",
"MessageChatCompletionRequestUserMessage",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1",
"MessageChatCompletionRequestAssistantMessage",
+ "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1",
"MessageChatCompletionRequestAssistantMessageToolCall",
"MessageChatCompletionRequestAssistantMessageToolCallFunction",
"MessageChatCompletionRequestToolMessage",
@@ -157,30 +165,82 @@ class CompletionCreateParamsBase(TypedDict, total=False):
"""
+class MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+
+MessageChatCompletionRequestSystemMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1
+]
+
+
class MessageChatCompletionRequestSystemMessage(TypedDict, total=False):
- content: Required[Union[str, SequenceNotStr[str]]]
+ content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestSystemMessageContentArrayOfContentPart]]]
"""The contents of the system message."""
role: Required[Literal["system"]]
"""The role of the messages author, in this case `system`."""
+class MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+
+MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1
+]
+
+
class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False):
- content: Required[Union[str, SequenceNotStr[str]]]
+ content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart]]]
"""The contents of the developer message."""
role: Required[Literal["developer"]]
"""The role of the messages author, in this case `developer`."""
+class MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+
+MessageChatCompletionRequestUserMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1
+]
+
+
class MessageChatCompletionRequestUserMessage(TypedDict, total=False):
- content: Required[Union[str, SequenceNotStr[str]]]
+ content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestUserMessageContentArrayOfContentPart]]]
"""The contents of the user message."""
role: Required[Literal["user"]]
"""The role of the messages author, in this case `user`."""
+class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+
+MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1
+]
+
+
class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False):
arguments: Required[str]
"""
@@ -209,7 +269,7 @@ class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False):
role: Required[Literal["assistant"]]
"""The role of the messages author, in this case `assistant`."""
- content: Union[str, SequenceNotStr[str], None]
+ content: Union[str, SequenceNotStr[MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart], None]
"""The contents of the assistant message."""
tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall]
diff --git a/src/gradient/types/agents/chat/completion_create_response.py b/src/gradient/types/agents/chat/completion_create_response.py
index 4c839ded..f3dedb4e 100644
--- a/src/gradient/types/agents/chat/completion_create_response.py
+++ b/src/gradient/types/agents/chat/completion_create_response.py
@@ -53,6 +53,9 @@ class ChoiceMessage(BaseModel):
content: Optional[str] = None
"""The contents of the message."""
+ reasoning_content: Optional[str] = None
+ """The reasoning content generated by the model."""
+
refusal: Optional[str] = None
"""The refusal message generated by the model."""
diff --git a/src/gradient/types/chat/completion_create_params.py b/src/gradient/types/chat/completion_create_params.py
index 17f00242..7874d893 100644
--- a/src/gradient/types/chat/completion_create_params.py
+++ b/src/gradient/types/chat/completion_create_params.py
@@ -11,9 +11,17 @@
"CompletionCreateParamsBase",
"Message",
"MessageChatCompletionRequestSystemMessage",
+ "MessageChatCompletionRequestSystemMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1",
"MessageChatCompletionRequestDeveloperMessage",
+ "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1",
"MessageChatCompletionRequestUserMessage",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1",
"MessageChatCompletionRequestAssistantMessage",
+ "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart",
+ "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1",
"MessageChatCompletionRequestAssistantMessageToolCall",
"MessageChatCompletionRequestAssistantMessageToolCallFunction",
"MessageChatCompletionRequestToolMessage",
@@ -157,30 +165,82 @@ class CompletionCreateParamsBase(TypedDict, total=False):
"""
+class MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+
+MessageChatCompletionRequestSystemMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1
+]
+
+
class MessageChatCompletionRequestSystemMessage(TypedDict, total=False):
- content: Required[Union[str, SequenceNotStr[str]]]
+ content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestSystemMessageContentArrayOfContentPart]]]
"""The contents of the system message."""
role: Required[Literal["system"]]
"""The role of the messages author, in this case `system`."""
+class MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+
+MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1
+]
+
+
class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False):
- content: Required[Union[str, SequenceNotStr[str]]]
+ content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart]]]
"""The contents of the developer message."""
role: Required[Literal["developer"]]
"""The role of the messages author, in this case `developer`."""
+class MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+
+MessageChatCompletionRequestUserMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1
+]
+
+
class MessageChatCompletionRequestUserMessage(TypedDict, total=False):
- content: Required[Union[str, SequenceNotStr[str]]]
+ content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestUserMessageContentArrayOfContentPart]]]
"""The contents of the user message."""
role: Required[Literal["user"]]
"""The role of the messages author, in this case `user`."""
+class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False):
+ text: Required[str]
+ """The text content"""
+
+ type: Required[Literal["text"]]
+ """The type of content part"""
+
+
+MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart: TypeAlias = Union[
+ str, MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1
+]
+
+
class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False):
arguments: Required[str]
"""
@@ -209,7 +269,7 @@ class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False):
role: Required[Literal["assistant"]]
"""The role of the messages author, in this case `assistant`."""
- content: Union[str, SequenceNotStr[str], None]
+ content: Union[str, SequenceNotStr[MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart], None]
"""The contents of the assistant message."""
tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall]
diff --git a/src/gradient/types/chat/completion_create_response.py b/src/gradient/types/chat/completion_create_response.py
index 73a09cf5..9e157aee 100644
--- a/src/gradient/types/chat/completion_create_response.py
+++ b/src/gradient/types/chat/completion_create_response.py
@@ -53,6 +53,9 @@ class ChoiceMessage(BaseModel):
content: Optional[str] = None
"""The contents of the message."""
+ reasoning_content: Optional[str] = None
+ """The reasoning content generated by the model."""
+
refusal: Optional[str] = None
"""The refusal message generated by the model."""
diff --git a/src/gradient/types/images/__init__.py b/src/gradient/types/images/__init__.py
index f8ee8b14..29634ec1 100644
--- a/src/gradient/types/images/__init__.py
+++ b/src/gradient/types/images/__init__.py
@@ -1,3 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
+
+from .generation_create_params import GenerationCreateParams as GenerationCreateParams
+from .generation_create_response import GenerationCreateResponse as GenerationCreateResponse
diff --git a/src/gradient/types/images/generation_create_params.py b/src/gradient/types/images/generation_create_params.py
new file mode 100644
index 00000000..c4347548
--- /dev/null
+++ b/src/gradient/types/images/generation_create_params.py
@@ -0,0 +1,85 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+from typing_extensions import Required, TypedDict
+
+__all__ = ["GenerationCreateParams"]
+
+
+class GenerationCreateParams(TypedDict, total=False):
+ prompt: Required[str]
+ """A text description of the desired image(s).
+
+ GPT-IMAGE-1 supports up to 32,000 characters and provides automatic prompt
+ optimization for best results.
+ """
+
+ background: Optional[str]
+ """The background setting for the image generation.
+
+ GPT-IMAGE-1 supports: transparent, opaque, auto.
+ """
+
+ model: str
+ """The model to use for image generation.
+
+ GPT-IMAGE-1 is the latest model offering the best quality with automatic
+ optimization and enhanced capabilities.
+ """
+
+ moderation: Optional[str]
+ """The moderation setting for the image generation.
+
+ GPT-IMAGE-1 supports: low, auto.
+ """
+
+ n: Optional[int]
+ """The number of images to generate. GPT-IMAGE-1 only supports n=1."""
+
+ output_compression: Optional[int]
+ """The output compression for the image generation. GPT-IMAGE-1 supports: 0-100."""
+
+ output_format: Optional[str]
+ """The output format for the image generation.
+
+ GPT-IMAGE-1 supports: png, webp, jpeg.
+ """
+
+ partial_images: Optional[int]
+ """The number of partial image chunks to return during streaming generation.
+
+ This parameter is optional with a default of 0. When stream=true, this must be
+ greater than 0 to receive progressive updates of the image as it's being
+ generated. Higher values provide more frequent updates but may increase response
+ overhead.
+ """
+
+ quality: Optional[str]
+ """The quality of the image that will be generated.
+
+ GPT-IMAGE-1 supports: auto (automatically select best quality), high, medium,
+ low.
+ """
+
+ size: Optional[str]
+ """The size of the generated images.
+
+ GPT-IMAGE-1 supports: auto (automatically select best size), 1536x1024
+ (landscape), 1024x1536 (portrait).
+ """
+
+ stream: Optional[bool]
+ """
+ If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
+ """
+
+ user: Optional[str]
+ """
+ A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+ """
diff --git a/src/gradient/types/images/generation_create_response.py b/src/gradient/types/images/generation_create_response.py
new file mode 100644
index 00000000..32757c06
--- /dev/null
+++ b/src/gradient/types/images/generation_create_response.py
@@ -0,0 +1,63 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ..._models import BaseModel
+
+__all__ = ["GenerationCreateResponse", "Data", "Usage", "UsageInputTokensDetails"]
+
+
+class Data(BaseModel):
+ b64_json: str
+ """The base64-encoded JSON of the generated image.
+
+ GPT-IMAGE-1 returns images in b64_json format only.
+ """
+
+ revised_prompt: Optional[str] = None
+ """The optimized prompt that was used to generate the image.
+
+ GPT-IMAGE-1 automatically optimizes prompts for best results.
+ """
+
+
+class UsageInputTokensDetails(BaseModel):
+ text_tokens: Optional[int] = None
+ """Number of text tokens in the input"""
+
+
+class Usage(BaseModel):
+ input_tokens: int
+ """Number of tokens in the input prompt"""
+
+ total_tokens: int
+ """Total number of tokens used (input + output)"""
+
+ input_tokens_details: Optional[UsageInputTokensDetails] = None
+ """Detailed breakdown of input tokens"""
+
+ output_tokens: Optional[int] = None
+ """Number of tokens in the generated output"""
+
+
+class GenerationCreateResponse(BaseModel):
+ created: int
+ """The Unix timestamp (in seconds) of when the images were created"""
+
+ data: List[Data]
+ """The list of generated images"""
+
+ background: Optional[str] = None
+ """The background setting used for the image generation"""
+
+ output_format: Optional[str] = None
+ """The output format of the generated image"""
+
+ quality: Optional[str] = None
+ """The quality setting used for the image generation"""
+
+ size: Optional[str] = None
+ """The size of the generated image"""
+
+ usage: Optional[Usage] = None
+ """Usage statistics for the image generation request"""
diff --git a/src/gradient/types/shared/chat_completion_chunk.py b/src/gradient/types/shared/chat_completion_chunk.py
index 4dd587f9..e30e0604 100644
--- a/src/gradient/types/shared/chat_completion_chunk.py
+++ b/src/gradient/types/shared/chat_completion_chunk.py
@@ -47,6 +47,9 @@ class ChoiceDelta(BaseModel):
content: Optional[str] = None
"""The contents of the chunk message."""
+ reasoning_content: Optional[str] = None
+ """The reasoning content generated by the model."""
+
refusal: Optional[str] = None
"""The refusal message generated by the model."""
diff --git a/src/gradient/types/shared/size.py b/src/gradient/types/shared/size.py
index 42b0b41f..73abb7dd 100644
--- a/src/gradient/types/shared/size.py
+++ b/src/gradient/types/shared/size.py
@@ -50,7 +50,7 @@ class Size(BaseModel):
regions: List[str]
"""
An array containing the region slugs where this size is available for Droplet
- creates. regions:read is required to view.
+ creates.
"""
slug: str
diff --git a/tests/api_resources/images/__init__.py b/tests/api_resources/images/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/images/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/images/test_generations.py b/tests/api_resources/images/test_generations.py
new file mode 100644
index 00000000..2ec20131
--- /dev/null
+++ b/tests/api_resources/images/test_generations.py
@@ -0,0 +1,130 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.images import GenerationCreateResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestGenerations:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ generation = client.images.generations.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ )
+ assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ generation = client.images.generations.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ background="auto",
+ model="openai-gpt-image-1",
+ moderation="auto",
+ n=1,
+ output_compression=100,
+ output_format="png",
+ partial_images=1,
+ quality="auto",
+ size="auto",
+ stream=False,
+ user="user-1234",
+ )
+ assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.images.generations.with_raw_response.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ generation = response.parse()
+ assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.images.generations.with_streaming_response.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ generation = response.parse()
+ assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncGenerations:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ generation = await async_client.images.generations.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ )
+ assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ generation = await async_client.images.generations.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ background="auto",
+ model="openai-gpt-image-1",
+ moderation="auto",
+ n=1,
+ output_compression=100,
+ output_format="png",
+ partial_images=1,
+ quality="auto",
+ size="auto",
+ stream=False,
+ user="user-1234",
+ )
+ assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.images.generations.with_raw_response.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ generation = await response.parse()
+ assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.images.generations.with_streaming_response.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ generation = await response.parse()
+ assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
From e23ac14538e17e8d33c33335285389cf13eefe04 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 30 Sep 2025 16:55:09 +0000
Subject: [PATCH 03/11] feat(api): update via SDK Studio
---
.stats.yml | 2 +-
api.md | 1 +
.../types/gpu_droplets/account/__init__.py | 1 +
.../account/key_create_response.py | 33 ++---------------
.../gpu_droplets/account/key_list_response.py | 33 ++---------------
.../account/key_retrieve_response.py | 33 ++---------------
.../account/key_update_response.py | 33 ++---------------
.../types/gpu_droplets/account/ssh_keys.py | 35 +++++++++++++++++++
8 files changed, 50 insertions(+), 121 deletions(-)
create mode 100644 src/gradient/types/gpu_droplets/account/ssh_keys.py
diff --git a/.stats.yml b/.stats.yml
index ac604dc7..267f3b96 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 174
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-3364ccdab713948eb06e9f77fb3b2046d0627ac2c18d389ef60d91bdfbeb2dd7.yml
openapi_spec_hash: 83a3d092965fde776b29b61f785459f9
-config_hash: a8e1b6dcbc8b1126938d320837c5926d
+config_hash: 7c196228f92c7538035b0688dce459f6
diff --git a/api.md b/api.md
index f416429a..5f5af408 100644
--- a/api.md
+++ b/api.md
@@ -771,6 +771,7 @@ Types:
```python
from gradient.types.gpu_droplets.account import (
+ SSHKeys,
KeyCreateResponse,
KeyRetrieveResponse,
KeyUpdateResponse,
diff --git a/src/gradient/types/gpu_droplets/account/__init__.py b/src/gradient/types/gpu_droplets/account/__init__.py
index 4cd64974..2d8a05ae 100644
--- a/src/gradient/types/gpu_droplets/account/__init__.py
+++ b/src/gradient/types/gpu_droplets/account/__init__.py
@@ -2,6 +2,7 @@
from __future__ import annotations
+from .ssh_keys import SSHKeys as SSHKeys
from .key_list_params import KeyListParams as KeyListParams
from .key_create_params import KeyCreateParams as KeyCreateParams
from .key_list_response import KeyListResponse as KeyListResponse
diff --git a/src/gradient/types/gpu_droplets/account/key_create_response.py b/src/gradient/types/gpu_droplets/account/key_create_response.py
index 9fe566ed..5ce63269 100644
--- a/src/gradient/types/gpu_droplets/account/key_create_response.py
+++ b/src/gradient/types/gpu_droplets/account/key_create_response.py
@@ -2,38 +2,11 @@
from typing import Optional
+from .ssh_keys import SSHKeys
from ...._models import BaseModel
-__all__ = ["KeyCreateResponse", "SSHKey"]
-
-
-class SSHKey(BaseModel):
- name: str
- """
- A human-readable display name for this key, used to easily identify the SSH keys
- when they are displayed.
- """
-
- public_key: str
- """The entire public key string that was uploaded.
-
- Embedded into the root user's `authorized_keys` file if you include this key
- during Droplet creation.
- """
-
- id: Optional[int] = None
- """A unique identification number for this key.
-
- Can be used to embed a specific SSH key into a Droplet.
- """
-
- fingerprint: Optional[str] = None
- """
- A unique identifier that differentiates this key from other keys using a format
- that SSH recognizes. The fingerprint is created when the key is added to your
- account.
- """
+__all__ = ["KeyCreateResponse"]
class KeyCreateResponse(BaseModel):
- ssh_key: Optional[SSHKey] = None
+ ssh_key: Optional[SSHKeys] = None
diff --git a/src/gradient/types/gpu_droplets/account/key_list_response.py b/src/gradient/types/gpu_droplets/account/key_list_response.py
index be4c721c..1151043e 100644
--- a/src/gradient/types/gpu_droplets/account/key_list_response.py
+++ b/src/gradient/types/gpu_droplets/account/key_list_response.py
@@ -2,39 +2,12 @@
from typing import List, Optional
+from .ssh_keys import SSHKeys
from ...._models import BaseModel
from ...shared.page_links import PageLinks
from ...shared.meta_properties import MetaProperties
-__all__ = ["KeyListResponse", "SSHKey"]
-
-
-class SSHKey(BaseModel):
- name: str
- """
- A human-readable display name for this key, used to easily identify the SSH keys
- when they are displayed.
- """
-
- public_key: str
- """The entire public key string that was uploaded.
-
- Embedded into the root user's `authorized_keys` file if you include this key
- during Droplet creation.
- """
-
- id: Optional[int] = None
- """A unique identification number for this key.
-
- Can be used to embed a specific SSH key into a Droplet.
- """
-
- fingerprint: Optional[str] = None
- """
- A unique identifier that differentiates this key from other keys using a format
- that SSH recognizes. The fingerprint is created when the key is added to your
- account.
- """
+__all__ = ["KeyListResponse"]
class KeyListResponse(BaseModel):
@@ -43,4 +16,4 @@ class KeyListResponse(BaseModel):
links: Optional[PageLinks] = None
- ssh_keys: Optional[List[SSHKey]] = None
+ ssh_keys: Optional[List[SSHKeys]] = None
diff --git a/src/gradient/types/gpu_droplets/account/key_retrieve_response.py b/src/gradient/types/gpu_droplets/account/key_retrieve_response.py
index 7cd3215e..da6e94d1 100644
--- a/src/gradient/types/gpu_droplets/account/key_retrieve_response.py
+++ b/src/gradient/types/gpu_droplets/account/key_retrieve_response.py
@@ -2,38 +2,11 @@
from typing import Optional
+from .ssh_keys import SSHKeys
from ...._models import BaseModel
-__all__ = ["KeyRetrieveResponse", "SSHKey"]
-
-
-class SSHKey(BaseModel):
- name: str
- """
- A human-readable display name for this key, used to easily identify the SSH keys
- when they are displayed.
- """
-
- public_key: str
- """The entire public key string that was uploaded.
-
- Embedded into the root user's `authorized_keys` file if you include this key
- during Droplet creation.
- """
-
- id: Optional[int] = None
- """A unique identification number for this key.
-
- Can be used to embed a specific SSH key into a Droplet.
- """
-
- fingerprint: Optional[str] = None
- """
- A unique identifier that differentiates this key from other keys using a format
- that SSH recognizes. The fingerprint is created when the key is added to your
- account.
- """
+__all__ = ["KeyRetrieveResponse"]
class KeyRetrieveResponse(BaseModel):
- ssh_key: Optional[SSHKey] = None
+ ssh_key: Optional[SSHKeys] = None
diff --git a/src/gradient/types/gpu_droplets/account/key_update_response.py b/src/gradient/types/gpu_droplets/account/key_update_response.py
index 2821e44a..54b81426 100644
--- a/src/gradient/types/gpu_droplets/account/key_update_response.py
+++ b/src/gradient/types/gpu_droplets/account/key_update_response.py
@@ -2,38 +2,11 @@
from typing import Optional
+from .ssh_keys import SSHKeys
from ...._models import BaseModel
-__all__ = ["KeyUpdateResponse", "SSHKey"]
-
-
-class SSHKey(BaseModel):
- name: str
- """
- A human-readable display name for this key, used to easily identify the SSH keys
- when they are displayed.
- """
-
- public_key: str
- """The entire public key string that was uploaded.
-
- Embedded into the root user's `authorized_keys` file if you include this key
- during Droplet creation.
- """
-
- id: Optional[int] = None
- """A unique identification number for this key.
-
- Can be used to embed a specific SSH key into a Droplet.
- """
-
- fingerprint: Optional[str] = None
- """
- A unique identifier that differentiates this key from other keys using a format
- that SSH recognizes. The fingerprint is created when the key is added to your
- account.
- """
+__all__ = ["KeyUpdateResponse"]
class KeyUpdateResponse(BaseModel):
- ssh_key: Optional[SSHKey] = None
+ ssh_key: Optional[SSHKeys] = None
diff --git a/src/gradient/types/gpu_droplets/account/ssh_keys.py b/src/gradient/types/gpu_droplets/account/ssh_keys.py
new file mode 100644
index 00000000..8112c18a
--- /dev/null
+++ b/src/gradient/types/gpu_droplets/account/ssh_keys.py
@@ -0,0 +1,35 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+
+__all__ = ["SSHKeys"]
+
+
+class SSHKeys(BaseModel):
+ name: str
+ """
+ A human-readable display name for this key, used to easily identify the SSH keys
+ when they are displayed.
+ """
+
+ public_key: str
+ """The entire public key string that was uploaded.
+
+ Embedded into the root user's `authorized_keys` file if you include this key
+ during Droplet creation.
+ """
+
+ id: Optional[int] = None
+ """A unique identification number for this key.
+
+ Can be used to embed a specific SSH key into a Droplet.
+ """
+
+ fingerprint: Optional[str] = None
+ """
+ A unique identifier that differentiates this key from other keys using a format
+ that SSH recognizes. The fingerprint is created when the key is added to your
+ account.
+ """
From fa68fb43e3e175b3dacd62d459b5d8c38b07e367 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 30 Sep 2025 17:32:34 +0000
Subject: [PATCH 04/11] feat(api): update via SDK Studio
---
.stats.yml | 4 ++--
src/gradient/resources/images/generations.py | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 267f3b96..7cf2d7ec 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 174
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-3364ccdab713948eb06e9f77fb3b2046d0627ac2c18d389ef60d91bdfbeb2dd7.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-3c4f564a80bb85ffc7e12ef4645c48a750ec0aba973975c170a9eb263c203c5e.yml
openapi_spec_hash: 83a3d092965fde776b29b61f785459f9
-config_hash: 7c196228f92c7538035b0688dce459f6
+config_hash: 9014a0ec1e08e776f0b3f12f4412b516
diff --git a/src/gradient/resources/images/generations.py b/src/gradient/resources/images/generations.py
index 32701d46..aa660ddd 100644
--- a/src/gradient/resources/images/generations.py
+++ b/src/gradient/resources/images/generations.py
@@ -122,7 +122,7 @@ def create(
return self._post(
"/images/generations"
if self._client._base_url_overridden
- else "https://inference.do-ai.run/v1/images/generations",
+ else f"{self._client.inference_endpoint}/images/generations",
body=maybe_transform(
{
"prompt": prompt,
@@ -246,7 +246,7 @@ async def create(
return await self._post(
"/images/generations"
if self._client._base_url_overridden
- else "https://inference.do-ai.run/v1/images/generations",
+ else f"{self._client.inference_endpoint}/images/generations",
body=await async_maybe_transform(
{
"prompt": prompt,
From 76d29b61ce039f3f270715135ab4d0f444a52b3c Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 30 Sep 2025 17:56:09 +0000
Subject: [PATCH 05/11] feat(api): update via SDK Studio
---
.stats.yml | 4 ++--
src/gradient/resources/images/generations.py | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 7cf2d7ec..8604fb78 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 174
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-3c4f564a80bb85ffc7e12ef4645c48a750ec0aba973975c170a9eb263c203c5e.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-40c154e2fdc4fef9ca1cf8329c29b7102bf324acfc9589571d6f3452d1ca579c.yml
openapi_spec_hash: 83a3d092965fde776b29b61f785459f9
-config_hash: 9014a0ec1e08e776f0b3f12f4412b516
+config_hash: ddab0a714b26eb4a0ed28df928df0b05
diff --git a/src/gradient/resources/images/generations.py b/src/gradient/resources/images/generations.py
index aa660ddd..9aea6bcb 100644
--- a/src/gradient/resources/images/generations.py
+++ b/src/gradient/resources/images/generations.py
@@ -122,7 +122,7 @@ def create(
return self._post(
"/images/generations"
if self._client._base_url_overridden
- else f"{self._client.inference_endpoint}/images/generations",
+ else f"{self._client.inference_endpoint}/v1/images/generations",
body=maybe_transform(
{
"prompt": prompt,
@@ -246,7 +246,7 @@ async def create(
return await self._post(
"/images/generations"
if self._client._base_url_overridden
- else f"{self._client.inference_endpoint}/images/generations",
+ else f"{self._client.inference_endpoint}/v1/images/generations",
body=await async_maybe_transform(
{
"prompt": prompt,
From 4b81c5cf4998707ca2b4eff25845f687e2002602 Mon Sep 17 00:00:00 2001
From: David Meadows
Date: Tue, 30 Sep 2025 18:48:16 -0400
Subject: [PATCH 06/11] chore(client): support model_access_key in image
generations
---
src/gradient/resources/images/generations.py | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/src/gradient/resources/images/generations.py b/src/gradient/resources/images/generations.py
index 9aea6bcb..9dcd2c43 100644
--- a/src/gradient/resources/images/generations.py
+++ b/src/gradient/resources/images/generations.py
@@ -119,6 +119,16 @@ def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
+ if not self._client.model_access_key:
+ raise TypeError(
+ "Could not resolve authentication method. Expected model_access_key to be set for chat completions."
+ )
+ headers = extra_headers or {}
+ headers = {
+ "Authorization": f"Bearer {self._client.model_access_key}",
+ **headers,
+ }
+
return self._post(
"/images/generations"
if self._client._base_url_overridden
From c202e81d81732217a839a0c7c5e56178252362a1 Mon Sep 17 00:00:00 2001
From: David Meadows
Date: Tue, 30 Sep 2025 18:49:54 -0400
Subject: [PATCH 07/11] chore(client): support model_access_key in image
generations for real
---
src/gradient/resources/images/generations.py | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/src/gradient/resources/images/generations.py b/src/gradient/resources/images/generations.py
index 9dcd2c43..976c49bd 100644
--- a/src/gradient/resources/images/generations.py
+++ b/src/gradient/resources/images/generations.py
@@ -151,7 +151,7 @@ def create(
generation_create_params.GenerationCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=GenerationCreateResponse,
)
@@ -253,6 +253,16 @@ async def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
+ if not self._client.model_access_key:
+ raise TypeError(
+ "Could not resolve authentication method. Expected model_access_key to be set for chat completions."
+ )
+ headers = extra_headers or {}
+ headers = {
+ "Authorization": f"Bearer {self._client.model_access_key}",
+ **headers,
+ }
+
return await self._post(
"/images/generations"
if self._client._base_url_overridden
@@ -275,7 +285,7 @@ async def create(
generation_create_params.GenerationCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=GenerationCreateResponse,
)
From 09bf61b5c24b1299a84ea6e8d4df3b88118d9fc3 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 2 Oct 2025 16:42:50 +0000
Subject: [PATCH 08/11] feat(api): update via SDK Studio
---
.stats.yml | 2 +-
api.md | 3 +
src/gradient/resources/images/generations.py | 389 +++++++++++++++++-
src/gradient/types/__init__.py | 3 +
.../types/images/generation_create_params.py | 31 +-
src/gradient/types/shared/__init__.py | 3 +
.../types/shared/image_gen_completed_event.py | 55 +++
.../shared/image_gen_partial_image_event.py | 33 ++
.../types/shared/image_gen_stream_event.py | 14 +
.../api_resources/images/test_generations.py | 126 +++++-
10 files changed, 637 insertions(+), 22 deletions(-)
create mode 100644 src/gradient/types/shared/image_gen_completed_event.py
create mode 100644 src/gradient/types/shared/image_gen_partial_image_event.py
create mode 100644 src/gradient/types/shared/image_gen_stream_event.py
diff --git a/.stats.yml b/.stats.yml
index 8604fb78..58e33183 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 174
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-40c154e2fdc4fef9ca1cf8329c29b7102bf324acfc9589571d6f3452d1ca579c.yml
openapi_spec_hash: 83a3d092965fde776b29b61f785459f9
-config_hash: ddab0a714b26eb4a0ed28df928df0b05
+config_hash: 3752c9f438752ab61938a8921bb3befd
diff --git a/api.md b/api.md
index 5f5af408..25f807c1 100644
--- a/api.md
+++ b/api.md
@@ -18,6 +18,9 @@ from gradient.types import (
GarbageCollection,
GPUInfo,
Image,
+ ImageGenCompletedEvent,
+ ImageGenPartialImageEvent,
+ ImageGenStreamEvent,
Kernel,
MetaProperties,
NetworkV4,
diff --git a/src/gradient/resources/images/generations.py b/src/gradient/resources/images/generations.py
index 976c49bd..8a5cfdb0 100644
--- a/src/gradient/resources/images/generations.py
+++ b/src/gradient/resources/images/generations.py
@@ -3,11 +3,12 @@
from __future__ import annotations
from typing import Optional
+from typing_extensions import Literal, overload
import httpx
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import maybe_transform, async_maybe_transform
+from ..._utils import required_args, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -16,8 +17,10 @@
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
+from ..._streaming import Stream, AsyncStream
from ..._base_client import make_request_options
from ...types.images import generation_create_params
+from ...types.shared.image_gen_stream_event import ImageGenStreamEvent
from ...types.images.generation_create_response import GenerationCreateResponse
__all__ = ["GenerationsResource", "AsyncGenerationsResource"]
@@ -43,6 +46,7 @@ def with_streaming_response(self) -> GenerationsResourceWithStreamingResponse:
"""
return GenerationsResourceWithStreamingResponse(self)
+ @overload
def create(
self,
*,
@@ -56,7 +60,7 @@ def create(
partial_images: Optional[int] | Omit = omit,
quality: Optional[str] | Omit = omit,
size: Optional[str] | Omit = omit,
- stream: Optional[bool] | Omit = omit,
+ stream: Optional[Literal[False]] | Omit = omit,
user: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -119,6 +123,189 @@ def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
+ ...
+
+ @overload
+ def create(
+ self,
+ *,
+ prompt: str,
+ stream: Literal[True],
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> Stream[ImageGenStreamEvent]:
+ """
+ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
+ image generation model with automatic prompt optimization and enhanced visual
+ capabilities.
+
+ Args:
+ prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000
+ characters and provides automatic prompt optimization for best results.
+
+ stream: If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
+
+ background:
+ The background setting for the image generation. GPT-IMAGE-1 supports:
+ transparent, opaque, auto.
+
+ model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering
+ the best quality with automatic optimization and enhanced capabilities.
+
+ moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low,
+ auto.
+
+ n: The number of images to generate. GPT-IMAGE-1 only supports n=1.
+
+ output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100.
+
+ output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp,
+ jpeg.
+
+ partial_images: The number of partial image chunks to return during streaming generation. This
+ parameter is optional with a default of 0. When stream=true, this must be
+ greater than 0 to receive progressive updates of the image as it's being
+ generated. Higher values provide more frequent updates but may increase response
+ overhead.
+
+ quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto
+ (automatically select best quality), high, medium, low.
+
+ size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically
+ select best size), 1536x1024 (landscape), 1024x1536 (portrait).
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def create(
+ self,
+ *,
+ prompt: str,
+ stream: bool,
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GenerationCreateResponse | Stream[ImageGenStreamEvent]:
+ """
+ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
+ image generation model with automatic prompt optimization and enhanced visual
+ capabilities.
+
+ Args:
+ prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000
+ characters and provides automatic prompt optimization for best results.
+
+ stream: If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
+
+ background:
+ The background setting for the image generation. GPT-IMAGE-1 supports:
+ transparent, opaque, auto.
+
+ model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering
+ the best quality with automatic optimization and enhanced capabilities.
+
+ moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low,
+ auto.
+
+ n: The number of images to generate. GPT-IMAGE-1 only supports n=1.
+
+ output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100.
+
+ output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp,
+ jpeg.
+
+ partial_images: The number of partial image chunks to return during streaming generation. This
+ parameter is optional with a default of 0. When stream=true, this must be
+ greater than 0 to receive progressive updates of the image as it's being
+ generated. Higher values provide more frequent updates but may increase response
+ overhead.
+
+ quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto
+ (automatically select best quality), high, medium, low.
+
+ size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically
+ select best size), 1536x1024 (landscape), 1024x1536 (portrait).
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["prompt"], ["prompt", "stream"])
+ def create(
+ self,
+ *,
+ prompt: str,
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GenerationCreateResponse | Stream[ImageGenStreamEvent]:
if not self._client.model_access_key:
raise TypeError(
"Could not resolve authentication method. Expected model_access_key to be set for chat completions."
@@ -148,12 +335,16 @@ def create(
"stream": stream,
"user": user,
},
- generation_create_params.GenerationCreateParams,
+ generation_create_params.GenerationCreateParamsStreaming
+ if stream
+ else generation_create_params.GenerationCreateParamsNonStreaming,
),
options=make_request_options(
extra_headers=headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=GenerationCreateResponse,
+ stream=stream or False,
+ stream_cls=Stream[ImageGenStreamEvent],
)
@@ -177,6 +368,7 @@ def with_streaming_response(self) -> AsyncGenerationsResourceWithStreamingRespon
"""
return AsyncGenerationsResourceWithStreamingResponse(self)
+ @overload
async def create(
self,
*,
@@ -190,7 +382,7 @@ async def create(
partial_images: Optional[int] | Omit = omit,
quality: Optional[str] | Omit = omit,
size: Optional[str] | Omit = omit,
- stream: Optional[bool] | Omit = omit,
+ stream: Optional[Literal[False]] | Omit = omit,
user: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -253,6 +445,189 @@ async def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
+ ...
+
+ @overload
+ async def create(
+ self,
+ *,
+ prompt: str,
+ stream: Literal[True],
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncStream[ImageGenStreamEvent]:
+ """
+ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
+ image generation model with automatic prompt optimization and enhanced visual
+ capabilities.
+
+ Args:
+ prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000
+ characters and provides automatic prompt optimization for best results.
+
+ stream: If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
+
+ background:
+ The background setting for the image generation. GPT-IMAGE-1 supports:
+ transparent, opaque, auto.
+
+ model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering
+ the best quality with automatic optimization and enhanced capabilities.
+
+ moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low,
+ auto.
+
+ n: The number of images to generate. GPT-IMAGE-1 only supports n=1.
+
+ output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100.
+
+ output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp,
+ jpeg.
+
+ partial_images: The number of partial image chunks to return during streaming generation. This
+ parameter is optional with a default of 0. When stream=true, this must be
+ greater than 0 to receive progressive updates of the image as it's being
+ generated. Higher values provide more frequent updates but may increase response
+ overhead.
+
+ quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto
+ (automatically select best quality), high, medium, low.
+
+ size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically
+ select best size), 1536x1024 (landscape), 1024x1536 (portrait).
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def create(
+ self,
+ *,
+ prompt: str,
+ stream: bool,
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GenerationCreateResponse | AsyncStream[ImageGenStreamEvent]:
+ """
+ Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
+ image generation model with automatic prompt optimization and enhanced visual
+ capabilities.
+
+ Args:
+ prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000
+ characters and provides automatic prompt optimization for best results.
+
+ stream: If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
+
+ background:
+ The background setting for the image generation. GPT-IMAGE-1 supports:
+ transparent, opaque, auto.
+
+ model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering
+ the best quality with automatic optimization and enhanced capabilities.
+
+ moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low,
+ auto.
+
+ n: The number of images to generate. GPT-IMAGE-1 only supports n=1.
+
+ output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100.
+
+ output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp,
+ jpeg.
+
+ partial_images: The number of partial image chunks to return during streaming generation. This
+ parameter is optional with a default of 0. When stream=true, this must be
+ greater than 0 to receive progressive updates of the image as it's being
+ generated. Higher values provide more frequent updates but may increase response
+ overhead.
+
+ quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto
+ (automatically select best quality), high, medium, low.
+
+ size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically
+ select best size), 1536x1024 (landscape), 1024x1536 (portrait).
+
+ user: A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["prompt"], ["prompt", "stream"])
+ async def create(
+ self,
+ *,
+ prompt: str,
+ background: Optional[str] | Omit = omit,
+ model: str | Omit = omit,
+ moderation: Optional[str] | Omit = omit,
+ n: Optional[int] | Omit = omit,
+ output_compression: Optional[int] | Omit = omit,
+ output_format: Optional[str] | Omit = omit,
+ partial_images: Optional[int] | Omit = omit,
+ quality: Optional[str] | Omit = omit,
+ size: Optional[str] | Omit = omit,
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
+ user: Optional[str] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> GenerationCreateResponse | AsyncStream[ImageGenStreamEvent]:
if not self._client.model_access_key:
raise TypeError(
"Could not resolve authentication method. Expected model_access_key to be set for chat completions."
@@ -282,12 +657,16 @@ async def create(
"stream": stream,
"user": user,
},
- generation_create_params.GenerationCreateParams,
+ generation_create_params.GenerationCreateParamsStreaming
+ if stream
+ else generation_create_params.GenerationCreateParamsNonStreaming,
),
options=make_request_options(
extra_headers=headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=GenerationCreateResponse,
+ stream=stream or False,
+ stream_cls=AsyncStream[ImageGenStreamEvent],
)
diff --git a/src/gradient/types/__init__.py b/src/gradient/types/__init__.py
index d28c4c1d..ad63485e 100644
--- a/src/gradient/types/__init__.py
+++ b/src/gradient/types/__init__.py
@@ -41,8 +41,11 @@
GarbageCollection as GarbageCollection,
FirewallRuleTarget as FirewallRuleTarget,
ChatCompletionChunk as ChatCompletionChunk,
+ ImageGenStreamEvent as ImageGenStreamEvent,
SubscriptionTierBase as SubscriptionTierBase,
+ ImageGenCompletedEvent as ImageGenCompletedEvent,
DropletNextBackupWindow as DropletNextBackupWindow,
+ ImageGenPartialImageEvent as ImageGenPartialImageEvent,
ChatCompletionTokenLogprob as ChatCompletionTokenLogprob,
)
from .api_agent import APIAgent as APIAgent
diff --git a/src/gradient/types/images/generation_create_params.py b/src/gradient/types/images/generation_create_params.py
index c4347548..ec8b672f 100644
--- a/src/gradient/types/images/generation_create_params.py
+++ b/src/gradient/types/images/generation_create_params.py
@@ -2,13 +2,13 @@
from __future__ import annotations
-from typing import Optional
-from typing_extensions import Required, TypedDict
+from typing import Union, Optional
+from typing_extensions import Literal, Required, TypedDict
-__all__ = ["GenerationCreateParams"]
+__all__ = ["GenerationCreateParamsBase", "GenerationCreateParamsNonStreaming", "GenerationCreateParamsStreaming"]
-class GenerationCreateParams(TypedDict, total=False):
+class GenerationCreateParamsBase(TypedDict, total=False):
prompt: Required[str]
"""A text description of the desired image(s).
@@ -70,7 +70,15 @@ class GenerationCreateParams(TypedDict, total=False):
(landscape), 1024x1536 (portrait).
"""
- stream: Optional[bool]
+ user: Optional[str]
+ """
+ A unique identifier representing your end-user, which can help DigitalOcean to
+ monitor and detect abuse.
+ """
+
+
+class GenerationCreateParamsNonStreaming(GenerationCreateParamsBase, total=False):
+ stream: Optional[Literal[False]]
"""
If set to true, partial image data will be streamed as the image is being
generated. When streaming, the response will be sent as server-sent events with
@@ -78,8 +86,15 @@ class GenerationCreateParams(TypedDict, total=False):
than 0.
"""
- user: Optional[str]
+
+class GenerationCreateParamsStreaming(GenerationCreateParamsBase):
+ stream: Required[Literal[True]]
"""
- A unique identifier representing your end-user, which can help DigitalOcean to
- monitor and detect abuse.
+ If set to true, partial image data will be streamed as the image is being
+ generated. When streaming, the response will be sent as server-sent events with
+ partial image chunks. When stream is true, partial_images must be greater
+ than 0.
"""
+
+
+GenerationCreateParams = Union[GenerationCreateParamsNonStreaming, GenerationCreateParamsStreaming]
diff --git a/src/gradient/types/shared/__init__.py b/src/gradient/types/shared/__init__.py
index 6d90845f..4fb2986a 100644
--- a/src/gradient/types/shared/__init__.py
+++ b/src/gradient/types/shared/__init__.py
@@ -24,6 +24,9 @@
from .garbage_collection import GarbageCollection as GarbageCollection
from .firewall_rule_target import FirewallRuleTarget as FirewallRuleTarget
from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
+from .image_gen_stream_event import ImageGenStreamEvent as ImageGenStreamEvent
from .subscription_tier_base import SubscriptionTierBase as SubscriptionTierBase
+from .image_gen_completed_event import ImageGenCompletedEvent as ImageGenCompletedEvent
from .droplet_next_backup_window import DropletNextBackupWindow as DropletNextBackupWindow
from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob
+from .image_gen_partial_image_event import ImageGenPartialImageEvent as ImageGenPartialImageEvent
diff --git a/src/gradient/types/shared/image_gen_completed_event.py b/src/gradient/types/shared/image_gen_completed_event.py
new file mode 100644
index 00000000..cbb282e5
--- /dev/null
+++ b/src/gradient/types/shared/image_gen_completed_event.py
@@ -0,0 +1,55 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ImageGenCompletedEvent", "Usage", "UsageInputTokensDetails"]
+
+
+class UsageInputTokensDetails(BaseModel):
+ image_tokens: int
+ """The number of image tokens in the input prompt."""
+
+ text_tokens: int
+ """The number of text tokens in the input prompt."""
+
+
+class Usage(BaseModel):
+ input_tokens: int
+ """The number of tokens (images and text) in the input prompt."""
+
+ input_tokens_details: UsageInputTokensDetails
+ """The input tokens detailed information for the image generation."""
+
+ output_tokens: int
+ """The number of image tokens in the output image."""
+
+ total_tokens: int
+ """The total number of tokens (images and text) used for the image generation."""
+
+
+class ImageGenCompletedEvent(BaseModel):
+ b64_json: str
+ """Base64-encoded image data, suitable for rendering as an image."""
+
+ background: Literal["transparent", "opaque", "auto"]
+ """The background setting for the generated image."""
+
+ created_at: int
+ """The Unix timestamp when the event was created."""
+
+ output_format: Literal["png", "webp", "jpeg"]
+ """The output format for the generated image."""
+
+ quality: Literal["low", "medium", "high", "auto"]
+ """The quality setting for the generated image."""
+
+ size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"]
+ """The size of the generated image."""
+
+ type: Literal["image_generation.completed"]
+ """The type of the event. Always `image_generation.completed`."""
+
+ usage: Usage
+ """For `gpt-image-1` only, the token usage information for the image generation."""
diff --git a/src/gradient/types/shared/image_gen_partial_image_event.py b/src/gradient/types/shared/image_gen_partial_image_event.py
new file mode 100644
index 00000000..4cc704b2
--- /dev/null
+++ b/src/gradient/types/shared/image_gen_partial_image_event.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ImageGenPartialImageEvent"]
+
+
+class ImageGenPartialImageEvent(BaseModel):
+ b64_json: str
+ """Base64-encoded partial image data, suitable for rendering as an image."""
+
+ background: Literal["transparent", "opaque", "auto"]
+ """The background setting for the requested image."""
+
+ created_at: int
+ """The Unix timestamp when the event was created."""
+
+ output_format: Literal["png", "webp", "jpeg"]
+ """The output format for the requested image."""
+
+ partial_image_index: int
+ """0-based index for the partial image (streaming)."""
+
+ quality: Literal["low", "medium", "high", "auto"]
+ """The quality setting for the requested image."""
+
+ size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"]
+ """The size of the requested image."""
+
+ type: Literal["image_generation.partial_image"]
+ """The type of the event. Always `image_generation.partial_image`."""
diff --git a/src/gradient/types/shared/image_gen_stream_event.py b/src/gradient/types/shared/image_gen_stream_event.py
new file mode 100644
index 00000000..30e9571e
--- /dev/null
+++ b/src/gradient/types/shared/image_gen_stream_event.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union
+from typing_extensions import Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .image_gen_completed_event import ImageGenCompletedEvent
+from .image_gen_partial_image_event import ImageGenPartialImageEvent
+
+__all__ = ["ImageGenStreamEvent"]
+
+ImageGenStreamEvent: TypeAlias = Annotated[
+ Union[ImageGenPartialImageEvent, ImageGenCompletedEvent], PropertyInfo(discriminator="type")
+]
diff --git a/tests/api_resources/images/test_generations.py b/tests/api_resources/images/test_generations.py
index 2ec20131..c9c67564 100644
--- a/tests/api_resources/images/test_generations.py
+++ b/tests/api_resources/images/test_generations.py
@@ -19,7 +19,7 @@ class TestGenerations:
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_method_create(self, client: Gradient) -> None:
+ def test_method_create_overload_1(self, client: Gradient) -> None:
generation = client.images.generations.create(
prompt="A cute baby sea otter floating on its back in calm blue water",
)
@@ -27,7 +27,7 @@ def test_method_create(self, client: Gradient) -> None:
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_method_create_with_all_params(self, client: Gradient) -> None:
+ def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None:
generation = client.images.generations.create(
prompt="A cute baby sea otter floating on its back in calm blue water",
background="auto",
@@ -46,7 +46,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None:
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_raw_response_create(self, client: Gradient) -> None:
+ def test_raw_response_create_overload_1(self, client: Gradient) -> None:
response = client.images.generations.with_raw_response.create(
prompt="A cute baby sea otter floating on its back in calm blue water",
)
@@ -58,7 +58,7 @@ def test_raw_response_create(self, client: Gradient) -> None:
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_streaming_response_create(self, client: Gradient) -> None:
+ def test_streaming_response_create_overload_1(self, client: Gradient) -> None:
with client.images.generations.with_streaming_response.create(
prompt="A cute baby sea otter floating on its back in calm blue water",
) as response:
@@ -70,6 +70,61 @@ def test_streaming_response_create(self, client: Gradient) -> None:
assert cast(Any, response.is_closed) is True
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_create_overload_2(self, client: Gradient) -> None:
+ generation_stream = client.images.generations.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
+ )
+ generation_stream.response.close()
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None:
+ generation_stream = client.images.generations.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
+ background="auto",
+ model="openai-gpt-image-1",
+ moderation="auto",
+ n=1,
+ output_compression=100,
+ output_format="png",
+ partial_images=1,
+ quality="auto",
+ size="auto",
+ user="user-1234",
+ )
+ generation_stream.response.close()
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_create_overload_2(self, client: Gradient) -> None:
+ response = client.images.generations.with_raw_response.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
+ )
+
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = response.parse()
+ stream.close()
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_create_overload_2(self, client: Gradient) -> None:
+ with client.images.generations.with_streaming_response.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = response.parse()
+ stream.close()
+
+ assert cast(Any, response.is_closed) is True
+
class TestAsyncGenerations:
parametrize = pytest.mark.parametrize(
@@ -78,7 +133,7 @@ class TestAsyncGenerations:
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_method_create(self, async_client: AsyncGradient) -> None:
+ async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None:
generation = await async_client.images.generations.create(
prompt="A cute baby sea otter floating on its back in calm blue water",
)
@@ -86,7 +141,7 @@ async def test_method_create(self, async_client: AsyncGradient) -> None:
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
generation = await async_client.images.generations.create(
prompt="A cute baby sea otter floating on its back in calm blue water",
background="auto",
@@ -105,7 +160,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient)
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None:
response = await async_client.images.generations.with_raw_response.create(
prompt="A cute baby sea otter floating on its back in calm blue water",
)
@@ -117,7 +172,7 @@ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None:
async with async_client.images.generations.with_streaming_response.create(
prompt="A cute baby sea otter floating on its back in calm blue water",
) as response:
@@ -128,3 +183,58 @@ async def test_streaming_response_create(self, async_client: AsyncGradient) -> N
assert_matches_type(GenerationCreateResponse, generation, path=["response"])
assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None:
+ generation_stream = await async_client.images.generations.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
+ )
+ await generation_stream.response.aclose()
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ generation_stream = await async_client.images.generations.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
+ background="auto",
+ model="openai-gpt-image-1",
+ moderation="auto",
+ n=1,
+ output_compression=100,
+ output_format="png",
+ partial_images=1,
+ quality="auto",
+ size="auto",
+ user="user-1234",
+ )
+ await generation_stream.response.aclose()
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.images.generations.with_raw_response.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
+ )
+
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ stream = await response.parse()
+ await stream.close()
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.images.generations.with_streaming_response.create(
+ prompt="A cute baby sea otter floating on its back in calm blue water",
+ stream=True,
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ stream = await response.parse()
+ await stream.close()
+
+ assert cast(Any, response.is_closed) is True
From 6896be4a1ce33e5f3749b2f107d9b06990b01495 Mon Sep 17 00:00:00 2001
From: bkannappan
Date: Thu, 2 Oct 2025 15:01:37 -0500
Subject: [PATCH 09/11] lint fix
---
src/gradient/_client.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/src/gradient/_client.py b/src/gradient/_client.py
index 0a33fed9..46d9b28b 100644
--- a/src/gradient/_client.py
+++ b/src/gradient/_client.py
@@ -34,7 +34,8 @@
if TYPE_CHECKING:
from .resources import (
chat,
- agents, images,
+ agents,
+ images,
models,
regions,
databases,
From 20f251223fbe35fbe170b07be41fa6fd2656eed7 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 2 Oct 2025 21:46:20 +0000
Subject: [PATCH 10/11] feat(api): update via SDK Studio
---
.stats.yml | 4 +-
api.md | 2 +
.../resources/knowledge_bases/data_sources.py | 94 +++++++++++++++++++
.../types/knowledge_bases/__init__.py | 6 ++
...ata_source_create_presigned_urls_params.py | 21 +++++
...a_source_create_presigned_urls_response.py | 30 ++++++
.../knowledge_bases/test_data_sources.py | 85 +++++++++++++++++
7 files changed, 240 insertions(+), 2 deletions(-)
create mode 100644 src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py
create mode 100644 src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py
diff --git a/.stats.yml b/.stats.yml
index 58e33183..13507cd5 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 174
+configured_endpoints: 175
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-40c154e2fdc4fef9ca1cf8329c29b7102bf324acfc9589571d6f3452d1ca579c.yml
openapi_spec_hash: 83a3d092965fde776b29b61f785459f9
-config_hash: 3752c9f438752ab61938a8921bb3befd
+config_hash: dd3a0f16fb9e072bb63c570b14beccd2
diff --git a/api.md b/api.md
index 25f807c1..6dd6c18e 100644
--- a/api.md
+++ b/api.md
@@ -852,6 +852,7 @@ from gradient.types.knowledge_bases import (
DataSourceCreateResponse,
DataSourceListResponse,
DataSourceDeleteResponse,
+ DataSourceCreatePresignedURLsResponse,
)
```
@@ -860,6 +861,7 @@ Methods:
- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse
- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse
- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse
+- client.knowledge_bases.data_sources.create_presigned_urls(\*\*params) -> DataSourceCreatePresignedURLsResponse
## IndexingJobs
diff --git a/src/gradient/resources/knowledge_bases/data_sources.py b/src/gradient/resources/knowledge_bases/data_sources.py
index 083ea45f..a00d93f5 100644
--- a/src/gradient/resources/knowledge_bases/data_sources.py
+++ b/src/gradient/resources/knowledge_bases/data_sources.py
@@ -2,6 +2,8 @@
from __future__ import annotations
+from typing import Iterable
+
import httpx
from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
@@ -18,6 +20,7 @@
from ...types.knowledge_bases import (
data_source_list_params,
data_source_create_params,
+ data_source_create_presigned_urls_params,
)
from ...types.knowledge_bases.aws_data_source_param import AwsDataSourceParam
from ...types.knowledge_bases.data_source_list_response import DataSourceListResponse
@@ -25,6 +28,7 @@
from ...types.knowledge_bases.data_source_delete_response import DataSourceDeleteResponse
from ...types.knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam
from ...types.knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam
+from ...types.knowledge_bases.data_source_create_presigned_urls_response import DataSourceCreatePresignedURLsResponse
__all__ = ["DataSourcesResource", "AsyncDataSourcesResource"]
@@ -203,6 +207,45 @@ def delete(
cast_to=DataSourceDeleteResponse,
)
+ def create_presigned_urls(
+ self,
+ *,
+ files: Iterable[data_source_create_presigned_urls_params.File] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DataSourceCreatePresignedURLsResponse:
+ """
+ To create presigned URLs for knowledge base data source file upload, send a POST
+ request to `/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls`.
+
+ Args:
+ files: A list of files to generate presigned URLs for.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls",
+ body=maybe_transform(
+ {"files": files}, data_source_create_presigned_urls_params.DataSourceCreatePresignedURLsParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DataSourceCreatePresignedURLsResponse,
+ )
+
class AsyncDataSourcesResource(AsyncAPIResource):
@cached_property
@@ -378,6 +421,45 @@ async def delete(
cast_to=DataSourceDeleteResponse,
)
+ async def create_presigned_urls(
+ self,
+ *,
+ files: Iterable[data_source_create_presigned_urls_params.File] | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> DataSourceCreatePresignedURLsResponse:
+ """
+ To create presigned URLs for knowledge base data source file upload, send a POST
+ request to `/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls`.
+
+ Args:
+ files: A list of files to generate presigned URLs for.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls",
+ body=await async_maybe_transform(
+ {"files": files}, data_source_create_presigned_urls_params.DataSourceCreatePresignedURLsParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=DataSourceCreatePresignedURLsResponse,
+ )
+
class DataSourcesResourceWithRawResponse:
def __init__(self, data_sources: DataSourcesResource) -> None:
@@ -392,6 +474,9 @@ def __init__(self, data_sources: DataSourcesResource) -> None:
self.delete = to_raw_response_wrapper(
data_sources.delete,
)
+ self.create_presigned_urls = to_raw_response_wrapper(
+ data_sources.create_presigned_urls,
+ )
class AsyncDataSourcesResourceWithRawResponse:
@@ -407,6 +492,9 @@ def __init__(self, data_sources: AsyncDataSourcesResource) -> None:
self.delete = async_to_raw_response_wrapper(
data_sources.delete,
)
+ self.create_presigned_urls = async_to_raw_response_wrapper(
+ data_sources.create_presigned_urls,
+ )
class DataSourcesResourceWithStreamingResponse:
@@ -422,6 +510,9 @@ def __init__(self, data_sources: DataSourcesResource) -> None:
self.delete = to_streamed_response_wrapper(
data_sources.delete,
)
+ self.create_presigned_urls = to_streamed_response_wrapper(
+ data_sources.create_presigned_urls,
+ )
class AsyncDataSourcesResourceWithStreamingResponse:
@@ -437,3 +528,6 @@ def __init__(self, data_sources: AsyncDataSourcesResource) -> None:
self.delete = async_to_streamed_response_wrapper(
data_sources.delete,
)
+ self.create_presigned_urls = async_to_streamed_response_wrapper(
+ data_sources.create_presigned_urls,
+ )
diff --git a/src/gradient/types/knowledge_bases/__init__.py b/src/gradient/types/knowledge_bases/__init__.py
index b23053f2..cab865fa 100644
--- a/src/gradient/types/knowledge_bases/__init__.py
+++ b/src/gradient/types/knowledge_bases/__init__.py
@@ -24,6 +24,12 @@
from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam as APIWebCrawlerDataSourceParam
from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams
from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse
+from .data_source_create_presigned_urls_params import (
+ DataSourceCreatePresignedURLsParams as DataSourceCreatePresignedURLsParams,
+)
+from .data_source_create_presigned_urls_response import (
+ DataSourceCreatePresignedURLsResponse as DataSourceCreatePresignedURLsResponse,
+)
from .indexing_job_retrieve_data_sources_response import (
IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse,
)
diff --git a/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py
new file mode 100644
index 00000000..253cbce7
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import TypedDict
+
+__all__ = ["DataSourceCreatePresignedURLsParams", "File"]
+
+
+class DataSourceCreatePresignedURLsParams(TypedDict, total=False):
+ files: Iterable[File]
+ """A list of files to generate presigned URLs for."""
+
+
+class File(TypedDict, total=False):
+ file_name: str
+ """Local filename"""
+
+ file_size: str
+ """The size of the file in bytes."""
diff --git a/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py
new file mode 100644
index 00000000..c3d172d7
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from ..._models import BaseModel
+
+__all__ = ["DataSourceCreatePresignedURLsResponse", "Upload"]
+
+
+class Upload(BaseModel):
+ expires_at: Optional[datetime] = None
+ """The time the url expires at."""
+
+ object_key: Optional[str] = None
+ """The unique object key to store the file as."""
+
+ original_file_name: Optional[str] = None
+ """The original file name."""
+
+ presigned_url: Optional[str] = None
+ """The actual presigned URL the client can use to upload the file directly."""
+
+
+class DataSourceCreatePresignedURLsResponse(BaseModel):
+ request_id: Optional[str] = None
+ """The ID generated for the request for Presigned URLs."""
+
+ uploads: Optional[List[Upload]] = None
+ """A list of generated presigned URLs and object keys, one per file."""
diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py
index bd7158d2..4214f880 100644
--- a/tests/api_resources/knowledge_bases/test_data_sources.py
+++ b/tests/api_resources/knowledge_bases/test_data_sources.py
@@ -13,6 +13,7 @@
DataSourceListResponse,
DataSourceCreateResponse,
DataSourceDeleteResponse,
+ DataSourceCreatePresignedURLsResponse,
)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -195,6 +196,47 @@ def test_path_params_delete(self, client: Gradient) -> None:
knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
)
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_create_presigned_urls(self, client: Gradient) -> None:
+ data_source = client.knowledge_bases.data_sources.create_presigned_urls()
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_create_presigned_urls_with_all_params(self, client: Gradient) -> None:
+ data_source = client.knowledge_bases.data_sources.create_presigned_urls(
+ files=[
+ {
+ "file_name": "example name",
+ "file_size": "file_size",
+ }
+ ],
+ )
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_create_presigned_urls(self, client: Gradient) -> None:
+ response = client.knowledge_bases.data_sources.with_raw_response.create_presigned_urls()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ data_source = response.parse()
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_create_presigned_urls(self, client: Gradient) -> None:
+ with client.knowledge_bases.data_sources.with_streaming_response.create_presigned_urls() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ data_source = response.parse()
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
class TestAsyncDataSources:
parametrize = pytest.mark.parametrize(
@@ -374,3 +416,46 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
data_source_uuid="",
knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"',
)
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_create_presigned_urls(self, async_client: AsyncGradient) -> None:
+ data_source = await async_client.knowledge_bases.data_sources.create_presigned_urls()
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_create_presigned_urls_with_all_params(self, async_client: AsyncGradient) -> None:
+ data_source = await async_client.knowledge_bases.data_sources.create_presigned_urls(
+ files=[
+ {
+ "file_name": "example name",
+ "file_size": "file_size",
+ }
+ ],
+ )
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_create_presigned_urls(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.data_sources.with_raw_response.create_presigned_urls()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ data_source = await response.parse()
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_create_presigned_urls(self, async_client: AsyncGradient) -> None:
+ async with (
+ async_client.knowledge_bases.data_sources.with_streaming_response.create_presigned_urls()
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ data_source = await response.parse()
+ assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
From c36154777796b496adf119abc40c41807f963194 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 3 Oct 2025 15:04:45 +0000
Subject: [PATCH 11/11] release: 3.1.0
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 24 ++++++++++++++++++++++++
pyproject.toml | 2 +-
src/gradient/_version.py | 2 +-
4 files changed, 27 insertions(+), 3 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index a1304a17..e0dc5001 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "3.0.2"
+ ".": "3.1.0"
}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 099062d4..0fed586c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,29 @@
# Changelog
+## 3.1.0 (2025-10-03)
+
+Full Changelog: [v3.0.2...v3.1.0](https://github.com/digitalocean/gradient-python/compare/v3.0.2...v3.1.0)
+
+### Features
+
+* **api:** update via SDK Studio ([20f2512](https://github.com/digitalocean/gradient-python/commit/20f251223fbe35fbe170b07be41fa6fd2656eed7))
+* **api:** update via SDK Studio ([09bf61b](https://github.com/digitalocean/gradient-python/commit/09bf61b5c24b1299a84ea6e8d4df3b88118d9fc3))
+* **api:** update via SDK Studio ([76d29b6](https://github.com/digitalocean/gradient-python/commit/76d29b61ce039f3f270715135ab4d0f444a52b3c))
+* **api:** update via SDK Studio ([fa68fb4](https://github.com/digitalocean/gradient-python/commit/fa68fb43e3e175b3dacd62d459b5d8c38b07e367))
+* **api:** update via SDK Studio ([e23ac14](https://github.com/digitalocean/gradient-python/commit/e23ac14538e17e8d33c33335285389cf13eefe04))
+* **api:** update via SDK Studio ([a5f6aa6](https://github.com/digitalocean/gradient-python/commit/a5f6aa656021a9aaa6a2e82dfa251f87f0096de0))
+* **api:** update via SDK Studio ([b900d76](https://github.com/digitalocean/gradient-python/commit/b900d769ba4a290523f17d2d69de850366c961b6))
+
+
+### Chores
+
+* **client:** support model_access_key in image generations ([4b81c5c](https://github.com/digitalocean/gradient-python/commit/4b81c5cf4998707ca2b4eff25845f687e2002602))
+* **client:** support model_access_key in image generations for real ([c202e81](https://github.com/digitalocean/gradient-python/commit/c202e81d81732217a839a0c7c5e56178252362a1))
+* fix bash quoting ([d92383d](https://github.com/digitalocean/gradient-python/commit/d92383da134a32cb0ae6f5a1c3044ec4947deacc))
+* quote bash variables ([6673263](https://github.com/digitalocean/gradient-python/commit/6673263dbdee2ae77eabd2f6d88cf61921f9e63c))
+* remove preview warning ([e4cf6a8](https://github.com/digitalocean/gradient-python/commit/e4cf6a8b5b37acf483be7301aa0a661a5db43a05))
+* update actions versions ([7056460](https://github.com/digitalocean/gradient-python/commit/7056460cef8093329da4ed24f2e7bd286213e90d))
+
## 3.0.2 (2025-09-24)
Full Changelog: [v3.0.1...v3.0.2](https://github.com/digitalocean/gradient-python/compare/v3.0.1...v3.0.2)
diff --git a/pyproject.toml b/pyproject.toml
index 62741433..e2ea2e5c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "gradient"
-version = "3.0.2"
+version = "3.1.0"
description = "The official Python library for the Gradient API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/gradient/_version.py b/src/gradient/_version.py
index bd32dfe9..69cb2fcb 100644
--- a/src/gradient/_version.py
+++ b/src/gradient/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "gradient"
-__version__ = "3.0.2" # x-release-please-version
+__version__ = "3.1.0" # x-release-please-version