diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 1f73031b..ff1c7af5 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "3.2.0"
+ ".": "3.3.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 13507cd5..60c46674 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 175
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-40c154e2fdc4fef9ca1cf8329c29b7102bf324acfc9589571d6f3452d1ca579c.yml
openapi_spec_hash: 83a3d092965fde776b29b61f785459f9
-config_hash: dd3a0f16fb9e072bb63c570b14beccd2
+config_hash: 8497af1695ff361853c745dd869dc6b9
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ed2eb2db..1f6a3702 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,14 @@
# Changelog
+## 3.3.0 (2025-10-07)
+
+Full Changelog: [v3.2.0...v3.3.0](https://github.com/digitalocean/gradient-python/compare/v3.2.0...v3.3.0)
+
+### Features
+
+* **api:** Images generations - openai ([e5a309e](https://github.com/digitalocean/gradient-python/commit/e5a309e46bf05846c580f425e6fa23f323138a4d))
+* **api:** update via SDK Studio ([c2bf693](https://github.com/digitalocean/gradient-python/commit/c2bf693d233830dafdfc2aa7f74e2ced2e8d81a0))
+
## 3.2.0 (2025-10-06)
Full Changelog: [v3.1.0...v3.2.0](https://github.com/digitalocean/gradient-python/compare/v3.1.0...v3.2.0)
diff --git a/api.md b/api.md
index 6dd6c18e..a2325441 100644
--- a/api.md
+++ b/api.md
@@ -388,17 +388,15 @@ Methods:
# Images
-## Generations
-
Types:
```python
-from gradient.types.images import GenerationCreateResponse
+from gradient.types import ImageGenerateResponse
```
Methods:
-- client.images.generations.create(\*\*params) -> GenerationCreateResponse
+- client.images.generate(\*\*params) -> ImageGenerateResponse
# GPUDroplets
diff --git a/pyproject.toml b/pyproject.toml
index dade45c8..d2489702 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "gradient"
-version = "3.2.0"
+version = "3.3.0"
description = "The official Python library for the Gradient API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/gradient/_client.py b/src/gradient/_client.py
index 46d9b28b..4f6c3796 100644
--- a/src/gradient/_client.py
+++ b/src/gradient/_client.py
@@ -43,6 +43,7 @@
gpu_droplets,
knowledge_bases,
)
+ from .resources.images import ImagesResource, AsyncImagesResource
from .resources.regions import RegionsResource, AsyncRegionsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
from .resources.gpu_droplets import (
@@ -50,7 +51,6 @@
AsyncGPUDropletsResource,
)
from .resources.agents.agents import AgentsResource, AsyncAgentsResource
- from .resources.images.images import ImagesResource, AsyncImagesResource
from .resources.models.models import ModelsResource, AsyncModelsResource
from .resources.databases.databases import DatabasesResource, AsyncDatabasesResource
from .resources.inference.inference import InferenceResource, AsyncInferenceResource
diff --git a/src/gradient/_version.py b/src/gradient/_version.py
index 6607400d..f90aaf96 100644
--- a/src/gradient/_version.py
+++ b/src/gradient/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "gradient"
-__version__ = "3.2.0" # x-release-please-version
+__version__ = "3.3.0" # x-release-please-version
diff --git a/src/gradient/resources/images/generations.py b/src/gradient/resources/images.py
similarity index 90%
rename from src/gradient/resources/images/generations.py
rename to src/gradient/resources/images.py
index 8a5cfdb0..14bbfeaa 100644
--- a/src/gradient/resources/images/generations.py
+++ b/src/gradient/resources/images.py
@@ -7,47 +7,47 @@
import httpx
-from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
-from ..._utils import required_args, maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
+from ..types import image_generate_params
+from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from .._utils import required_args, maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
to_raw_response_wrapper,
to_streamed_response_wrapper,
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
-from ..._streaming import Stream, AsyncStream
-from ..._base_client import make_request_options
-from ...types.images import generation_create_params
-from ...types.shared.image_gen_stream_event import ImageGenStreamEvent
-from ...types.images.generation_create_response import GenerationCreateResponse
+from .._streaming import Stream, AsyncStream
+from .._base_client import make_request_options
+from ..types.image_generate_response import ImageGenerateResponse
+from ..types.shared.image_gen_stream_event import ImageGenStreamEvent
-__all__ = ["GenerationsResource", "AsyncGenerationsResource"]
+__all__ = ["ImagesResource", "AsyncImagesResource"]
-class GenerationsResource(SyncAPIResource):
+class ImagesResource(SyncAPIResource):
@cached_property
- def with_raw_response(self) -> GenerationsResourceWithRawResponse:
+ def with_raw_response(self) -> ImagesResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
"""
- return GenerationsResourceWithRawResponse(self)
+ return ImagesResourceWithRawResponse(self)
@cached_property
- def with_streaming_response(self) -> GenerationsResourceWithStreamingResponse:
+ def with_streaming_response(self) -> ImagesResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
"""
- return GenerationsResourceWithStreamingResponse(self)
+ return ImagesResourceWithStreamingResponse(self)
@overload
- def create(
+ def generate(
self,
*,
prompt: str,
@@ -68,7 +68,7 @@ def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> GenerationCreateResponse:
+ ) -> ImageGenerateResponse:
"""
Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
image generation model with automatic prompt optimization and enhanced visual
@@ -126,7 +126,7 @@ def create(
...
@overload
- def create(
+ def generate(
self,
*,
prompt: str,
@@ -205,7 +205,7 @@ def create(
...
@overload
- def create(
+ def generate(
self,
*,
prompt: str,
@@ -226,7 +226,7 @@ def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> GenerationCreateResponse | Stream[ImageGenStreamEvent]:
+ ) -> ImageGenerateResponse | Stream[ImageGenStreamEvent]:
"""
Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
image generation model with automatic prompt optimization and enhanced visual
@@ -284,7 +284,7 @@ def create(
...
@required_args(["prompt"], ["prompt", "stream"])
- def create(
+ def generate(
self,
*,
prompt: str,
@@ -305,7 +305,7 @@ def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> GenerationCreateResponse | Stream[ImageGenStreamEvent]:
+ ) -> ImageGenerateResponse | Stream[ImageGenStreamEvent]:
if not self._client.model_access_key:
raise TypeError(
"Could not resolve authentication method. Expected model_access_key to be set for chat completions."
@@ -335,41 +335,41 @@ def create(
"stream": stream,
"user": user,
},
- generation_create_params.GenerationCreateParamsStreaming
+ image_generate_params.ImageGenerateParamsStreaming
if stream
- else generation_create_params.GenerationCreateParamsNonStreaming,
+ else image_generate_params.ImageGenerateParamsNonStreaming,
),
options=make_request_options(
extra_headers=headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=GenerationCreateResponse,
+ cast_to=ImageGenerateResponse,
stream=stream or False,
stream_cls=Stream[ImageGenStreamEvent],
)
-class AsyncGenerationsResource(AsyncAPIResource):
+class AsyncImagesResource(AsyncAPIResource):
@cached_property
- def with_raw_response(self) -> AsyncGenerationsResourceWithRawResponse:
+ def with_raw_response(self) -> AsyncImagesResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
"""
- return AsyncGenerationsResourceWithRawResponse(self)
+ return AsyncImagesResourceWithRawResponse(self)
@cached_property
- def with_streaming_response(self) -> AsyncGenerationsResourceWithStreamingResponse:
+ def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
"""
- return AsyncGenerationsResourceWithStreamingResponse(self)
+ return AsyncImagesResourceWithStreamingResponse(self)
@overload
- async def create(
+ async def generate(
self,
*,
prompt: str,
@@ -390,7 +390,7 @@ async def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> GenerationCreateResponse:
+ ) -> ImageGenerateResponse:
"""
Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
image generation model with automatic prompt optimization and enhanced visual
@@ -448,7 +448,7 @@ async def create(
...
@overload
- async def create(
+ async def generate(
self,
*,
prompt: str,
@@ -527,7 +527,7 @@ async def create(
...
@overload
- async def create(
+ async def generate(
self,
*,
prompt: str,
@@ -548,7 +548,7 @@ async def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> GenerationCreateResponse | AsyncStream[ImageGenStreamEvent]:
+ ) -> ImageGenerateResponse | AsyncStream[ImageGenStreamEvent]:
"""
Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest
image generation model with automatic prompt optimization and enhanced visual
@@ -606,7 +606,7 @@ async def create(
...
@required_args(["prompt"], ["prompt", "stream"])
- async def create(
+ async def generate(
self,
*,
prompt: str,
@@ -627,7 +627,7 @@ async def create(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
- ) -> GenerationCreateResponse | AsyncStream[ImageGenStreamEvent]:
+ ) -> ImageGenerateResponse | AsyncStream[ImageGenStreamEvent]:
if not self._client.model_access_key:
raise TypeError(
"Could not resolve authentication method. Expected model_access_key to be set for chat completions."
@@ -637,7 +637,6 @@ async def create(
"Authorization": f"Bearer {self._client.model_access_key}",
**headers,
}
-
return await self._post(
"/images/generations"
if self._client._base_url_overridden
@@ -657,50 +656,50 @@ async def create(
"stream": stream,
"user": user,
},
- generation_create_params.GenerationCreateParamsStreaming
+ image_generate_params.ImageGenerateParamsStreaming
if stream
- else generation_create_params.GenerationCreateParamsNonStreaming,
+ else image_generate_params.ImageGenerateParamsNonStreaming,
),
options=make_request_options(
extra_headers=headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=GenerationCreateResponse,
+ cast_to=ImageGenerateResponse,
stream=stream or False,
stream_cls=AsyncStream[ImageGenStreamEvent],
)
-class GenerationsResourceWithRawResponse:
- def __init__(self, generations: GenerationsResource) -> None:
- self._generations = generations
+class ImagesResourceWithRawResponse:
+ def __init__(self, images: ImagesResource) -> None:
+ self._images = images
- self.create = to_raw_response_wrapper(
- generations.create,
+ self.generate = to_raw_response_wrapper(
+ images.generate,
)
-class AsyncGenerationsResourceWithRawResponse:
- def __init__(self, generations: AsyncGenerationsResource) -> None:
- self._generations = generations
+class AsyncImagesResourceWithRawResponse:
+ def __init__(self, images: AsyncImagesResource) -> None:
+ self._images = images
- self.create = async_to_raw_response_wrapper(
- generations.create,
+ self.generate = async_to_raw_response_wrapper(
+ images.generate,
)
-class GenerationsResourceWithStreamingResponse:
- def __init__(self, generations: GenerationsResource) -> None:
- self._generations = generations
+class ImagesResourceWithStreamingResponse:
+ def __init__(self, images: ImagesResource) -> None:
+ self._images = images
- self.create = to_streamed_response_wrapper(
- generations.create,
+ self.generate = to_streamed_response_wrapper(
+ images.generate,
)
-class AsyncGenerationsResourceWithStreamingResponse:
- def __init__(self, generations: AsyncGenerationsResource) -> None:
- self._generations = generations
+class AsyncImagesResourceWithStreamingResponse:
+ def __init__(self, images: AsyncImagesResource) -> None:
+ self._images = images
- self.create = async_to_streamed_response_wrapper(
- generations.create,
+ self.generate = async_to_streamed_response_wrapper(
+ images.generate,
)
diff --git a/src/gradient/resources/images/__init__.py b/src/gradient/resources/images/__init__.py
deleted file mode 100644
index cf187f1d..00000000
--- a/src/gradient/resources/images/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .images import (
- ImagesResource,
- AsyncImagesResource,
- ImagesResourceWithRawResponse,
- AsyncImagesResourceWithRawResponse,
- ImagesResourceWithStreamingResponse,
- AsyncImagesResourceWithStreamingResponse,
-)
-from .generations import (
- GenerationsResource,
- AsyncGenerationsResource,
- GenerationsResourceWithRawResponse,
- AsyncGenerationsResourceWithRawResponse,
- GenerationsResourceWithStreamingResponse,
- AsyncGenerationsResourceWithStreamingResponse,
-)
-
-__all__ = [
- "GenerationsResource",
- "AsyncGenerationsResource",
- "GenerationsResourceWithRawResponse",
- "AsyncGenerationsResourceWithRawResponse",
- "GenerationsResourceWithStreamingResponse",
- "AsyncGenerationsResourceWithStreamingResponse",
- "ImagesResource",
- "AsyncImagesResource",
- "ImagesResourceWithRawResponse",
- "AsyncImagesResourceWithRawResponse",
- "ImagesResourceWithStreamingResponse",
- "AsyncImagesResourceWithStreamingResponse",
-]
diff --git a/src/gradient/resources/images/images.py b/src/gradient/resources/images/images.py
deleted file mode 100644
index 37e7290f..00000000
--- a/src/gradient/resources/images/images.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from .generations import (
- GenerationsResource,
- AsyncGenerationsResource,
- GenerationsResourceWithRawResponse,
- AsyncGenerationsResourceWithRawResponse,
- GenerationsResourceWithStreamingResponse,
- AsyncGenerationsResourceWithStreamingResponse,
-)
-
-__all__ = ["ImagesResource", "AsyncImagesResource"]
-
-
-class ImagesResource(SyncAPIResource):
- @cached_property
- def generations(self) -> GenerationsResource:
- return GenerationsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> ImagesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
- """
- return ImagesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ImagesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
- """
- return ImagesResourceWithStreamingResponse(self)
-
-
-class AsyncImagesResource(AsyncAPIResource):
- @cached_property
- def generations(self) -> AsyncGenerationsResource:
- return AsyncGenerationsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncImagesResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
- """
- return AsyncImagesResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
- """
- return AsyncImagesResourceWithStreamingResponse(self)
-
-
-class ImagesResourceWithRawResponse:
- def __init__(self, images: ImagesResource) -> None:
- self._images = images
-
- @cached_property
- def generations(self) -> GenerationsResourceWithRawResponse:
- return GenerationsResourceWithRawResponse(self._images.generations)
-
-
-class AsyncImagesResourceWithRawResponse:
- def __init__(self, images: AsyncImagesResource) -> None:
- self._images = images
-
- @cached_property
- def generations(self) -> AsyncGenerationsResourceWithRawResponse:
- return AsyncGenerationsResourceWithRawResponse(self._images.generations)
-
-
-class ImagesResourceWithStreamingResponse:
- def __init__(self, images: ImagesResource) -> None:
- self._images = images
-
- @cached_property
- def generations(self) -> GenerationsResourceWithStreamingResponse:
- return GenerationsResourceWithStreamingResponse(self._images.generations)
-
-
-class AsyncImagesResourceWithStreamingResponse:
- def __init__(self, images: AsyncImagesResource) -> None:
- self._images = images
-
- @cached_property
- def generations(self) -> AsyncGenerationsResourceWithStreamingResponse:
- return AsyncGenerationsResourceWithStreamingResponse(self._images.generations)
diff --git a/src/gradient/types/__init__.py b/src/gradient/types/__init__.py
index ad63485e..1299779c 100644
--- a/src/gradient/types/__init__.py
+++ b/src/gradient/types/__init__.py
@@ -68,10 +68,12 @@
from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse
from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse
from .droplet_backup_policy import DropletBackupPolicy as DropletBackupPolicy
+from .image_generate_params import ImageGenerateParams as ImageGenerateParams
from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo
from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse
from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo
from .gpu_droplet_list_params import GPUDropletListParams as GPUDropletListParams
+from .image_generate_response import ImageGenerateResponse as ImageGenerateResponse
from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility
from .gpu_droplet_create_params import GPUDropletCreateParams as GPUDropletCreateParams
from .gpu_droplet_list_response import GPUDropletListResponse as GPUDropletListResponse
diff --git a/src/gradient/types/images/generation_create_params.py b/src/gradient/types/image_generate_params.py
similarity index 86%
rename from src/gradient/types/images/generation_create_params.py
rename to src/gradient/types/image_generate_params.py
index ec8b672f..42e6144a 100644
--- a/src/gradient/types/images/generation_create_params.py
+++ b/src/gradient/types/image_generate_params.py
@@ -5,10 +5,10 @@
from typing import Union, Optional
from typing_extensions import Literal, Required, TypedDict
-__all__ = ["GenerationCreateParamsBase", "GenerationCreateParamsNonStreaming", "GenerationCreateParamsStreaming"]
+__all__ = ["ImageGenerateParamsBase", "ImageGenerateParamsNonStreaming", "ImageGenerateParamsStreaming"]
-class GenerationCreateParamsBase(TypedDict, total=False):
+class ImageGenerateParamsBase(TypedDict, total=False):
prompt: Required[str]
"""A text description of the desired image(s).
@@ -77,7 +77,7 @@ class GenerationCreateParamsBase(TypedDict, total=False):
"""
-class GenerationCreateParamsNonStreaming(GenerationCreateParamsBase, total=False):
+class ImageGenerateParamsNonStreaming(ImageGenerateParamsBase, total=False):
stream: Optional[Literal[False]]
"""
If set to true, partial image data will be streamed as the image is being
@@ -87,7 +87,7 @@ class GenerationCreateParamsNonStreaming(GenerationCreateParamsBase, total=False
"""
-class GenerationCreateParamsStreaming(GenerationCreateParamsBase):
+class ImageGenerateParamsStreaming(ImageGenerateParamsBase):
stream: Required[Literal[True]]
"""
If set to true, partial image data will be streamed as the image is being
@@ -97,4 +97,4 @@ class GenerationCreateParamsStreaming(GenerationCreateParamsBase):
"""
-GenerationCreateParams = Union[GenerationCreateParamsNonStreaming, GenerationCreateParamsStreaming]
+ImageGenerateParams = Union[ImageGenerateParamsNonStreaming, ImageGenerateParamsStreaming]
diff --git a/src/gradient/types/images/generation_create_response.py b/src/gradient/types/image_generate_response.py
similarity index 90%
rename from src/gradient/types/images/generation_create_response.py
rename to src/gradient/types/image_generate_response.py
index 32757c06..5f97697c 100644
--- a/src/gradient/types/images/generation_create_response.py
+++ b/src/gradient/types/image_generate_response.py
@@ -2,9 +2,9 @@
from typing import List, Optional
-from ..._models import BaseModel
+from .._models import BaseModel
-__all__ = ["GenerationCreateResponse", "Data", "Usage", "UsageInputTokensDetails"]
+__all__ = ["ImageGenerateResponse", "Data", "Usage", "UsageInputTokensDetails"]
class Data(BaseModel):
@@ -40,7 +40,7 @@ class Usage(BaseModel):
"""Number of tokens in the generated output"""
-class GenerationCreateResponse(BaseModel):
+class ImageGenerateResponse(BaseModel):
created: int
"""The Unix timestamp (in seconds) of when the images were created"""
diff --git a/src/gradient/types/images/__init__.py b/src/gradient/types/images/__init__.py
deleted file mode 100644
index 29634ec1..00000000
--- a/src/gradient/types/images/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .generation_create_params import GenerationCreateParams as GenerationCreateParams
-from .generation_create_response import GenerationCreateResponse as GenerationCreateResponse
diff --git a/tests/api_resources/images/__init__.py b/tests/api_resources/images/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/images/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/images/test_generations.py b/tests/api_resources/test_images.py
similarity index 62%
rename from tests/api_resources/images/test_generations.py
rename to tests/api_resources/test_images.py
index c9c67564..47428d02 100644
--- a/tests/api_resources/images/test_generations.py
+++ b/tests/api_resources/test_images.py
@@ -9,26 +9,26 @@
from gradient import Gradient, AsyncGradient
from tests.utils import assert_matches_type
-from gradient.types.images import GenerationCreateResponse
+from gradient.types import ImageGenerateResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-class TestGenerations:
+class TestImages:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_method_create_overload_1(self, client: Gradient) -> None:
- generation = client.images.generations.create(
+ def test_method_generate_overload_1(self, client: Gradient) -> None:
+ image = client.images.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
)
- assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None:
- generation = client.images.generations.create(
+ def test_method_generate_with_all_params_overload_1(self, client: Gradient) -> None:
+ image = client.images.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
background="auto",
model="openai-gpt-image-1",
@@ -42,47 +42,47 @@ def test_method_create_with_all_params_overload_1(self, client: Gradient) -> Non
stream=False,
user="user-1234",
)
- assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_raw_response_create_overload_1(self, client: Gradient) -> None:
- response = client.images.generations.with_raw_response.create(
+ def test_raw_response_generate_overload_1(self, client: Gradient) -> None:
+ response = client.images.with_raw_response.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- generation = response.parse()
- assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+ image = response.parse()
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_streaming_response_create_overload_1(self, client: Gradient) -> None:
- with client.images.generations.with_streaming_response.create(
+ def test_streaming_response_generate_overload_1(self, client: Gradient) -> None:
+ with client.images.with_streaming_response.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- generation = response.parse()
- assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+ image = response.parse()
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
assert cast(Any, response.is_closed) is True
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_method_create_overload_2(self, client: Gradient) -> None:
- generation_stream = client.images.generations.create(
+ def test_method_generate_overload_2(self, client: Gradient) -> None:
+ image_stream = client.images.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
stream=True,
)
- generation_stream.response.close()
+ image_stream.response.close()
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None:
- generation_stream = client.images.generations.create(
+ def test_method_generate_with_all_params_overload_2(self, client: Gradient) -> None:
+ image_stream = client.images.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
stream=True,
background="auto",
@@ -96,12 +96,12 @@ def test_method_create_with_all_params_overload_2(self, client: Gradient) -> Non
size="auto",
user="user-1234",
)
- generation_stream.response.close()
+ image_stream.response.close()
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_raw_response_create_overload_2(self, client: Gradient) -> None:
- response = client.images.generations.with_raw_response.create(
+ def test_raw_response_generate_overload_2(self, client: Gradient) -> None:
+ response = client.images.with_raw_response.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
stream=True,
)
@@ -112,8 +112,8 @@ def test_raw_response_create_overload_2(self, client: Gradient) -> None:
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- def test_streaming_response_create_overload_2(self, client: Gradient) -> None:
- with client.images.generations.with_streaming_response.create(
+ def test_streaming_response_generate_overload_2(self, client: Gradient) -> None:
+ with client.images.with_streaming_response.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
stream=True,
) as response:
@@ -126,23 +126,23 @@ def test_streaming_response_create_overload_2(self, client: Gradient) -> None:
assert cast(Any, response.is_closed) is True
-class TestAsyncGenerations:
+class TestAsyncImages:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None:
- generation = await async_client.images.generations.create(
+ async def test_method_generate_overload_1(self, async_client: AsyncGradient) -> None:
+ image = await async_client.images.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
)
- assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
- generation = await async_client.images.generations.create(
+ async def test_method_generate_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
+ image = await async_client.images.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
background="auto",
model="openai-gpt-image-1",
@@ -156,47 +156,47 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
stream=False,
user="user-1234",
)
- assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None:
- response = await async_client.images.generations.with_raw_response.create(
+ async def test_raw_response_generate_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.images.with_raw_response.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- generation = await response.parse()
- assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+ image = await response.parse()
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None:
- async with async_client.images.generations.with_streaming_response.create(
+ async def test_streaming_response_generate_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.images.with_streaming_response.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- generation = await response.parse()
- assert_matches_type(GenerationCreateResponse, generation, path=["response"])
+ image = await response.parse()
+ assert_matches_type(ImageGenerateResponse, image, path=["response"])
assert cast(Any, response.is_closed) is True
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None:
- generation_stream = await async_client.images.generations.create(
+ async def test_method_generate_overload_2(self, async_client: AsyncGradient) -> None:
+ image_stream = await async_client.images.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
stream=True,
)
- await generation_stream.response.aclose()
+ await image_stream.response.aclose()
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
- generation_stream = await async_client.images.generations.create(
+ async def test_method_generate_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ image_stream = await async_client.images.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
stream=True,
background="auto",
@@ -210,12 +210,12 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
size="auto",
user="user-1234",
)
- await generation_stream.response.aclose()
+ await image_stream.response.aclose()
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None:
- response = await async_client.images.generations.with_raw_response.create(
+ async def test_raw_response_generate_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.images.with_raw_response.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
stream=True,
)
@@ -226,8 +226,8 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncGradient)
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
- async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None:
- async with async_client.images.generations.with_streaming_response.create(
+ async def test_streaming_response_generate_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.images.with_streaming_response.generate(
prompt="A cute baby sea otter floating on its back in calm blue water",
stream=True,
) as response: