diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a1304a1..e0dc500 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.0.2" + ".": "3.1.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index e30c19b..13507cd 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 173 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-621c3ebf5011c5ca508f78fccbea17de4ca6b35bfe99578c1ae2265021578d6f.yml -openapi_spec_hash: e29d14e3e4679fcf22b3e760e49931b1 -config_hash: 3d425c415b7f7ab581418b43eb521cb3 +configured_endpoints: 175 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-40c154e2fdc4fef9ca1cf8329c29b7102bf324acfc9589571d6f3452d1ca579c.yml +openapi_spec_hash: 83a3d092965fde776b29b61f785459f9 +config_hash: dd3a0f16fb9e072bb63c570b14beccd2 diff --git a/CHANGELOG.md b/CHANGELOG.md index 099062d..0fed586 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## 3.1.0 (2025-10-03) + +Full Changelog: [v3.0.2...v3.1.0](https://github.com/digitalocean/gradient-python/compare/v3.0.2...v3.1.0) + +### Features + +* **api:** update via SDK Studio ([20f2512](https://github.com/digitalocean/gradient-python/commit/20f251223fbe35fbe170b07be41fa6fd2656eed7)) +* **api:** update via SDK Studio ([09bf61b](https://github.com/digitalocean/gradient-python/commit/09bf61b5c24b1299a84ea6e8d4df3b88118d9fc3)) +* **api:** update via SDK Studio ([76d29b6](https://github.com/digitalocean/gradient-python/commit/76d29b61ce039f3f270715135ab4d0f444a52b3c)) +* **api:** update via SDK Studio ([fa68fb4](https://github.com/digitalocean/gradient-python/commit/fa68fb43e3e175b3dacd62d459b5d8c38b07e367)) +* **api:** update via SDK Studio ([e23ac14](https://github.com/digitalocean/gradient-python/commit/e23ac14538e17e8d33c33335285389cf13eefe04)) +* **api:** update via SDK Studio ([a5f6aa6](https://github.com/digitalocean/gradient-python/commit/a5f6aa656021a9aaa6a2e82dfa251f87f0096de0)) +* **api:** update via SDK Studio ([b900d76](https://github.com/digitalocean/gradient-python/commit/b900d769ba4a290523f17d2d69de850366c961b6)) + + +### Chores + +* **client:** support model_access_key in image generations ([4b81c5c](https://github.com/digitalocean/gradient-python/commit/4b81c5cf4998707ca2b4eff25845f687e2002602)) +* **client:** support model_access_key in image generations for real ([c202e81](https://github.com/digitalocean/gradient-python/commit/c202e81d81732217a839a0c7c5e56178252362a1)) +* fix bash quoting ([d92383d](https://github.com/digitalocean/gradient-python/commit/d92383da134a32cb0ae6f5a1c3044ec4947deacc)) +* quote bash variables ([6673263](https://github.com/digitalocean/gradient-python/commit/6673263dbdee2ae77eabd2f6d88cf61921f9e63c)) +* remove preview warning ([e4cf6a8](https://github.com/digitalocean/gradient-python/commit/e4cf6a8b5b37acf483be7301aa0a661a5db43a05)) +* update actions versions ([7056460](https://github.com/digitalocean/gradient-python/commit/7056460cef8093329da4ed24f2e7bd286213e90d)) + ## 3.0.2 (2025-09-24) Full Changelog: [v3.0.1...v3.0.2](https://github.com/digitalocean/gradient-python/compare/v3.0.1...v3.0.2) diff --git a/api.md b/api.md index 7299b3c..6dd6c18 100644 --- a/api.md +++ b/api.md @@ -18,6 +18,9 @@ from gradient.types import ( GarbageCollection, GPUInfo, Image, + ImageGenCompletedEvent, + ImageGenPartialImageEvent, + ImageGenStreamEvent, Kernel, MetaProperties, NetworkV4, @@ -383,6 +386,20 @@ Methods: - client.chat.completions.create(\*\*params) -> CompletionCreateResponse +# Images + +## Generations + +Types: + +```python +from gradient.types.images import GenerationCreateResponse +``` + +Methods: + +- client.images.generations.create(\*\*params) -> GenerationCreateResponse + # GPUDroplets Types: @@ -757,6 +774,7 @@ Types: ```python from gradient.types.gpu_droplets.account import ( + SSHKeys, KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -834,6 +852,7 @@ from gradient.types.knowledge_bases import ( DataSourceCreateResponse, DataSourceListResponse, DataSourceDeleteResponse, + DataSourceCreatePresignedURLsResponse, ) ``` @@ -842,6 +861,7 @@ Methods: - client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse - client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse - client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse +- client.knowledge_bases.data_sources.create_presigned_urls(\*\*params) -> DataSourceCreatePresignedURLsResponse ## IndexingJobs diff --git a/pyproject.toml b/pyproject.toml index 6274143..e2ea2e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gradient" -version = "3.0.2" +version = "3.1.0" description = "The official Python library for the Gradient API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradient/_client.py b/src/gradient/_client.py index 338d343..46d9b28 100644 --- a/src/gradient/_client.py +++ b/src/gradient/_client.py @@ -35,6 +35,7 @@ from .resources import ( chat, agents, + images, models, regions, databases, @@ -49,6 +50,7 @@ AsyncGPUDropletsResource, ) from .resources.agents.agents import AgentsResource, AsyncAgentsResource + from .resources.images.images import ImagesResource, AsyncImagesResource from .resources.models.models import ModelsResource, AsyncModelsResource from .resources.databases.databases import DatabasesResource, AsyncDatabasesResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource @@ -182,6 +184,12 @@ def chat(self) -> ChatResource: return ChatResource(self) + @cached_property + def images(self) -> ImagesResource: + from .resources.images import ImagesResource + + return ImagesResource(self) + @cached_property def gpu_droplets(self) -> GPUDropletsResource: from .resources.gpu_droplets import GPUDropletsResource @@ -471,6 +479,12 @@ def chat(self) -> AsyncChatResource: return AsyncChatResource(self) + @cached_property + def images(self) -> AsyncImagesResource: + from .resources.images import AsyncImagesResource + + return AsyncImagesResource(self) + @cached_property def gpu_droplets(self) -> AsyncGPUDropletsResource: from .resources.gpu_droplets import AsyncGPUDropletsResource @@ -665,6 +679,12 @@ def chat(self) -> chat.ChatResourceWithRawResponse: return ChatResourceWithRawResponse(self._client.chat) + @cached_property + def images(self) -> images.ImagesResourceWithRawResponse: + from .resources.images import ImagesResourceWithRawResponse + + return ImagesResourceWithRawResponse(self._client.images) + @cached_property def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithRawResponse: from .resources.gpu_droplets import GPUDropletsResourceWithRawResponse @@ -720,6 +740,12 @@ def chat(self) -> chat.AsyncChatResourceWithRawResponse: return AsyncChatResourceWithRawResponse(self._client.chat) + @cached_property + def images(self) -> images.AsyncImagesResourceWithRawResponse: + from .resources.images import AsyncImagesResourceWithRawResponse + + return AsyncImagesResourceWithRawResponse(self._client.images) + @cached_property def gpu_droplets(self) -> gpu_droplets.AsyncGPUDropletsResourceWithRawResponse: from .resources.gpu_droplets import AsyncGPUDropletsResourceWithRawResponse @@ -779,6 +805,12 @@ def chat(self) -> chat.ChatResourceWithStreamingResponse: return ChatResourceWithStreamingResponse(self._client.chat) + @cached_property + def images(self) -> images.ImagesResourceWithStreamingResponse: + from .resources.images import ImagesResourceWithStreamingResponse + + return ImagesResourceWithStreamingResponse(self._client.images) + @cached_property def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithStreamingResponse: from .resources.gpu_droplets import GPUDropletsResourceWithStreamingResponse @@ -838,6 +870,12 @@ def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: return AsyncChatResourceWithStreamingResponse(self._client.chat) + @cached_property + def images(self) -> images.AsyncImagesResourceWithStreamingResponse: + from .resources.images import AsyncImagesResourceWithStreamingResponse + + return AsyncImagesResourceWithStreamingResponse(self._client.images) + @cached_property def gpu_droplets( self, diff --git a/src/gradient/_version.py b/src/gradient/_version.py index bd32dfe..69cb2fc 100644 --- a/src/gradient/_version.py +++ b/src/gradient/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradient" -__version__ = "3.0.2" # x-release-please-version +__version__ = "3.1.0" # x-release-please-version diff --git a/src/gradient/resources/__init__.py b/src/gradient/resources/__init__.py index d519856..fdc7d34 100644 --- a/src/gradient/resources/__init__.py +++ b/src/gradient/resources/__init__.py @@ -16,6 +16,14 @@ AgentsResourceWithStreamingResponse, AsyncAgentsResourceWithStreamingResponse, ) +from .images import ( + ImagesResource, + AsyncImagesResource, + ImagesResourceWithRawResponse, + AsyncImagesResourceWithRawResponse, + ImagesResourceWithStreamingResponse, + AsyncImagesResourceWithStreamingResponse, +) from .models import ( ModelsResource, AsyncModelsResource, @@ -78,6 +86,12 @@ "AsyncChatResourceWithRawResponse", "ChatResourceWithStreamingResponse", "AsyncChatResourceWithStreamingResponse", + "ImagesResource", + "AsyncImagesResource", + "ImagesResourceWithRawResponse", + "AsyncImagesResourceWithRawResponse", + "ImagesResourceWithStreamingResponse", + "AsyncImagesResourceWithStreamingResponse", "GPUDropletsResource", "AsyncGPUDropletsResource", "GPUDropletsResourceWithRawResponse", diff --git a/src/gradient/resources/images/__init__.py b/src/gradient/resources/images/__init__.py new file mode 100644 index 0000000..cf187f1 --- /dev/null +++ b/src/gradient/resources/images/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .images import ( + ImagesResource, + AsyncImagesResource, + ImagesResourceWithRawResponse, + AsyncImagesResourceWithRawResponse, + ImagesResourceWithStreamingResponse, + AsyncImagesResourceWithStreamingResponse, +) +from .generations import ( + GenerationsResource, + AsyncGenerationsResource, + GenerationsResourceWithRawResponse, + AsyncGenerationsResourceWithRawResponse, + GenerationsResourceWithStreamingResponse, + AsyncGenerationsResourceWithStreamingResponse, +) + +__all__ = [ + "GenerationsResource", + "AsyncGenerationsResource", + "GenerationsResourceWithRawResponse", + "AsyncGenerationsResourceWithRawResponse", + "GenerationsResourceWithStreamingResponse", + "AsyncGenerationsResourceWithStreamingResponse", + "ImagesResource", + "AsyncImagesResource", + "ImagesResourceWithRawResponse", + "AsyncImagesResourceWithRawResponse", + "ImagesResourceWithStreamingResponse", + "AsyncImagesResourceWithStreamingResponse", +] diff --git a/src/gradient/resources/images/generations.py b/src/gradient/resources/images/generations.py new file mode 100644 index 0000000..8a5cfdb --- /dev/null +++ b/src/gradient/resources/images/generations.py @@ -0,0 +1,706 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, overload + +import httpx + +from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given +from ..._utils import required_args, maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._streaming import Stream, AsyncStream +from ..._base_client import make_request_options +from ...types.images import generation_create_params +from ...types.shared.image_gen_stream_event import ImageGenStreamEvent +from ...types.images.generation_create_response import GenerationCreateResponse + +__all__ = ["GenerationsResource", "AsyncGenerationsResource"] + + +class GenerationsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> GenerationsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return GenerationsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> GenerationsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return GenerationsResourceWithStreamingResponse(self) + + @overload + def create( + self, + *, + prompt: str, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + prompt: str, + stream: Literal[True], + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> Stream[ImageGenStreamEvent]: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + prompt: str, + stream: bool, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse | Stream[ImageGenStreamEvent]: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["prompt"], ["prompt", "stream"]) + def create( + self, + *, + prompt: str, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse | Stream[ImageGenStreamEvent]: + if not self._client.model_access_key: + raise TypeError( + "Could not resolve authentication method. Expected model_access_key to be set for chat completions." + ) + headers = extra_headers or {} + headers = { + "Authorization": f"Bearer {self._client.model_access_key}", + **headers, + } + + return self._post( + "/images/generations" + if self._client._base_url_overridden + else f"{self._client.inference_endpoint}/v1/images/generations", + body=maybe_transform( + { + "prompt": prompt, + "background": background, + "model": model, + "moderation": moderation, + "n": n, + "output_compression": output_compression, + "output_format": output_format, + "partial_images": partial_images, + "quality": quality, + "size": size, + "stream": stream, + "user": user, + }, + generation_create_params.GenerationCreateParamsStreaming + if stream + else generation_create_params.GenerationCreateParamsNonStreaming, + ), + options=make_request_options( + extra_headers=headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=GenerationCreateResponse, + stream=stream or False, + stream_cls=Stream[ImageGenStreamEvent], + ) + + +class AsyncGenerationsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncGenerationsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return AsyncGenerationsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncGenerationsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return AsyncGenerationsResourceWithStreamingResponse(self) + + @overload + async def create( + self, + *, + prompt: str, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + stream: Optional[Literal[False]] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + prompt: str, + stream: Literal[True], + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> AsyncStream[ImageGenStreamEvent]: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + prompt: str, + stream: bool, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse | AsyncStream[ImageGenStreamEvent]: + """ + Creates a high-quality image from a text prompt using GPT-IMAGE-1, the latest + image generation model with automatic prompt optimization and enhanced visual + capabilities. + + Args: + prompt: A text description of the desired image(s). GPT-IMAGE-1 supports up to 32,000 + characters and provides automatic prompt optimization for best results. + + stream: If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + + background: + The background setting for the image generation. GPT-IMAGE-1 supports: + transparent, opaque, auto. + + model: The model to use for image generation. GPT-IMAGE-1 is the latest model offering + the best quality with automatic optimization and enhanced capabilities. + + moderation: The moderation setting for the image generation. GPT-IMAGE-1 supports: low, + auto. + + n: The number of images to generate. GPT-IMAGE-1 only supports n=1. + + output_compression: The output compression for the image generation. GPT-IMAGE-1 supports: 0-100. + + output_format: The output format for the image generation. GPT-IMAGE-1 supports: png, webp, + jpeg. + + partial_images: The number of partial image chunks to return during streaming generation. This + parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + + quality: The quality of the image that will be generated. GPT-IMAGE-1 supports: auto + (automatically select best quality), high, medium, low. + + size: The size of the generated images. GPT-IMAGE-1 supports: auto (automatically + select best size), 1536x1024 (landscape), 1024x1536 (portrait). + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["prompt"], ["prompt", "stream"]) + async def create( + self, + *, + prompt: str, + background: Optional[str] | Omit = omit, + model: str | Omit = omit, + moderation: Optional[str] | Omit = omit, + n: Optional[int] | Omit = omit, + output_compression: Optional[int] | Omit = omit, + output_format: Optional[str] | Omit = omit, + partial_images: Optional[int] | Omit = omit, + quality: Optional[str] | Omit = omit, + size: Optional[str] | Omit = omit, + stream: Optional[Literal[False]] | Literal[True] | Omit = omit, + user: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> GenerationCreateResponse | AsyncStream[ImageGenStreamEvent]: + if not self._client.model_access_key: + raise TypeError( + "Could not resolve authentication method. Expected model_access_key to be set for chat completions." + ) + headers = extra_headers or {} + headers = { + "Authorization": f"Bearer {self._client.model_access_key}", + **headers, + } + + return await self._post( + "/images/generations" + if self._client._base_url_overridden + else f"{self._client.inference_endpoint}/v1/images/generations", + body=await async_maybe_transform( + { + "prompt": prompt, + "background": background, + "model": model, + "moderation": moderation, + "n": n, + "output_compression": output_compression, + "output_format": output_format, + "partial_images": partial_images, + "quality": quality, + "size": size, + "stream": stream, + "user": user, + }, + generation_create_params.GenerationCreateParamsStreaming + if stream + else generation_create_params.GenerationCreateParamsNonStreaming, + ), + options=make_request_options( + extra_headers=headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=GenerationCreateResponse, + stream=stream or False, + stream_cls=AsyncStream[ImageGenStreamEvent], + ) + + +class GenerationsResourceWithRawResponse: + def __init__(self, generations: GenerationsResource) -> None: + self._generations = generations + + self.create = to_raw_response_wrapper( + generations.create, + ) + + +class AsyncGenerationsResourceWithRawResponse: + def __init__(self, generations: AsyncGenerationsResource) -> None: + self._generations = generations + + self.create = async_to_raw_response_wrapper( + generations.create, + ) + + +class GenerationsResourceWithStreamingResponse: + def __init__(self, generations: GenerationsResource) -> None: + self._generations = generations + + self.create = to_streamed_response_wrapper( + generations.create, + ) + + +class AsyncGenerationsResourceWithStreamingResponse: + def __init__(self, generations: AsyncGenerationsResource) -> None: + self._generations = generations + + self.create = async_to_streamed_response_wrapper( + generations.create, + ) diff --git a/src/gradient/resources/images/images.py b/src/gradient/resources/images/images.py new file mode 100644 index 0000000..37e7290 --- /dev/null +++ b/src/gradient/resources/images/images.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from .generations import ( + GenerationsResource, + AsyncGenerationsResource, + GenerationsResourceWithRawResponse, + AsyncGenerationsResourceWithRawResponse, + GenerationsResourceWithStreamingResponse, + AsyncGenerationsResourceWithStreamingResponse, +) + +__all__ = ["ImagesResource", "AsyncImagesResource"] + + +class ImagesResource(SyncAPIResource): + @cached_property + def generations(self) -> GenerationsResource: + return GenerationsResource(self._client) + + @cached_property + def with_raw_response(self) -> ImagesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return ImagesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ImagesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return ImagesResourceWithStreamingResponse(self) + + +class AsyncImagesResource(AsyncAPIResource): + @cached_property + def generations(self) -> AsyncGenerationsResource: + return AsyncGenerationsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncImagesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers + """ + return AsyncImagesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response + """ + return AsyncImagesResourceWithStreamingResponse(self) + + +class ImagesResourceWithRawResponse: + def __init__(self, images: ImagesResource) -> None: + self._images = images + + @cached_property + def generations(self) -> GenerationsResourceWithRawResponse: + return GenerationsResourceWithRawResponse(self._images.generations) + + +class AsyncImagesResourceWithRawResponse: + def __init__(self, images: AsyncImagesResource) -> None: + self._images = images + + @cached_property + def generations(self) -> AsyncGenerationsResourceWithRawResponse: + return AsyncGenerationsResourceWithRawResponse(self._images.generations) + + +class ImagesResourceWithStreamingResponse: + def __init__(self, images: ImagesResource) -> None: + self._images = images + + @cached_property + def generations(self) -> GenerationsResourceWithStreamingResponse: + return GenerationsResourceWithStreamingResponse(self._images.generations) + + +class AsyncImagesResourceWithStreamingResponse: + def __init__(self, images: AsyncImagesResource) -> None: + self._images = images + + @cached_property + def generations(self) -> AsyncGenerationsResourceWithStreamingResponse: + return AsyncGenerationsResourceWithStreamingResponse(self._images.generations) diff --git a/src/gradient/resources/knowledge_bases/data_sources.py b/src/gradient/resources/knowledge_bases/data_sources.py index 083ea45..a00d93f 100644 --- a/src/gradient/resources/knowledge_bases/data_sources.py +++ b/src/gradient/resources/knowledge_bases/data_sources.py @@ -2,6 +2,8 @@ from __future__ import annotations +from typing import Iterable + import httpx from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given @@ -18,6 +20,7 @@ from ...types.knowledge_bases import ( data_source_list_params, data_source_create_params, + data_source_create_presigned_urls_params, ) from ...types.knowledge_bases.aws_data_source_param import AwsDataSourceParam from ...types.knowledge_bases.data_source_list_response import DataSourceListResponse @@ -25,6 +28,7 @@ from ...types.knowledge_bases.data_source_delete_response import DataSourceDeleteResponse from ...types.knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam from ...types.knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam +from ...types.knowledge_bases.data_source_create_presigned_urls_response import DataSourceCreatePresignedURLsResponse __all__ = ["DataSourcesResource", "AsyncDataSourcesResource"] @@ -203,6 +207,45 @@ def delete( cast_to=DataSourceDeleteResponse, ) + def create_presigned_urls( + self, + *, + files: Iterable[data_source_create_presigned_urls_params.File] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> DataSourceCreatePresignedURLsResponse: + """ + To create presigned URLs for knowledge base data source file upload, send a POST + request to `/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls`. + + Args: + files: A list of files to generate presigned URLs for. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls", + body=maybe_transform( + {"files": files}, data_source_create_presigned_urls_params.DataSourceCreatePresignedURLsParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceCreatePresignedURLsResponse, + ) + class AsyncDataSourcesResource(AsyncAPIResource): @cached_property @@ -378,6 +421,45 @@ async def delete( cast_to=DataSourceDeleteResponse, ) + async def create_presigned_urls( + self, + *, + files: Iterable[data_source_create_presigned_urls_params.File] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> DataSourceCreatePresignedURLsResponse: + """ + To create presigned URLs for knowledge base data source file upload, send a POST + request to `/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls`. + + Args: + files: A list of files to generate presigned URLs for. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases/data_sources/file_upload_presigned_urls", + body=await async_maybe_transform( + {"files": files}, data_source_create_presigned_urls_params.DataSourceCreatePresignedURLsParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceCreatePresignedURLsResponse, + ) + class DataSourcesResourceWithRawResponse: def __init__(self, data_sources: DataSourcesResource) -> None: @@ -392,6 +474,9 @@ def __init__(self, data_sources: DataSourcesResource) -> None: self.delete = to_raw_response_wrapper( data_sources.delete, ) + self.create_presigned_urls = to_raw_response_wrapper( + data_sources.create_presigned_urls, + ) class AsyncDataSourcesResourceWithRawResponse: @@ -407,6 +492,9 @@ def __init__(self, data_sources: AsyncDataSourcesResource) -> None: self.delete = async_to_raw_response_wrapper( data_sources.delete, ) + self.create_presigned_urls = async_to_raw_response_wrapper( + data_sources.create_presigned_urls, + ) class DataSourcesResourceWithStreamingResponse: @@ -422,6 +510,9 @@ def __init__(self, data_sources: DataSourcesResource) -> None: self.delete = to_streamed_response_wrapper( data_sources.delete, ) + self.create_presigned_urls = to_streamed_response_wrapper( + data_sources.create_presigned_urls, + ) class AsyncDataSourcesResourceWithStreamingResponse: @@ -437,3 +528,6 @@ def __init__(self, data_sources: AsyncDataSourcesResource) -> None: self.delete = async_to_streamed_response_wrapper( data_sources.delete, ) + self.create_presigned_urls = async_to_streamed_response_wrapper( + data_sources.create_presigned_urls, + ) diff --git a/src/gradient/types/__init__.py b/src/gradient/types/__init__.py index d28c4c1..ad63485 100644 --- a/src/gradient/types/__init__.py +++ b/src/gradient/types/__init__.py @@ -41,8 +41,11 @@ GarbageCollection as GarbageCollection, FirewallRuleTarget as FirewallRuleTarget, ChatCompletionChunk as ChatCompletionChunk, + ImageGenStreamEvent as ImageGenStreamEvent, SubscriptionTierBase as SubscriptionTierBase, + ImageGenCompletedEvent as ImageGenCompletedEvent, DropletNextBackupWindow as DropletNextBackupWindow, + ImageGenPartialImageEvent as ImageGenPartialImageEvent, ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, ) from .api_agent import APIAgent as APIAgent diff --git a/src/gradient/types/agents/chat/completion_create_params.py b/src/gradient/types/agents/chat/completion_create_params.py index d8cf7bc..0980132 100644 --- a/src/gradient/types/agents/chat/completion_create_params.py +++ b/src/gradient/types/agents/chat/completion_create_params.py @@ -11,9 +11,17 @@ "CompletionCreateParamsBase", "Message", "MessageChatCompletionRequestSystemMessage", + "MessageChatCompletionRequestSystemMessageContentArrayOfContentPart", + "MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestDeveloperMessage", + "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart", + "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestUserMessage", + "MessageChatCompletionRequestUserMessageContentArrayOfContentPart", + "MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestAssistantMessage", + "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart", + "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestAssistantMessageToolCall", "MessageChatCompletionRequestAssistantMessageToolCallFunction", "MessageChatCompletionRequestToolMessage", @@ -157,30 +165,82 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ +class MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestSystemMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): - content: Required[Union[str, SequenceNotStr[str]]] + content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestSystemMessageContentArrayOfContentPart]]] """The contents of the system message.""" role: Required[Literal["system"]] """The role of the messages author, in this case `system`.""" +class MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): - content: Required[Union[str, SequenceNotStr[str]]] + content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart]]] """The contents of the developer message.""" role: Required[Literal["developer"]] """The role of the messages author, in this case `developer`.""" +class MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestUserMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestUserMessage(TypedDict, total=False): - content: Required[Union[str, SequenceNotStr[str]]] + content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestUserMessageContentArrayOfContentPart]]] """The contents of the user message.""" role: Required[Literal["user"]] """The role of the messages author, in this case `user`.""" +class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False): arguments: Required[str] """ @@ -209,7 +269,7 @@ class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" - content: Union[str, SequenceNotStr[str], None] + content: Union[str, SequenceNotStr[MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart], None] """The contents of the assistant message.""" tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall] diff --git a/src/gradient/types/agents/chat/completion_create_response.py b/src/gradient/types/agents/chat/completion_create_response.py index 4c839de..f3dedb4 100644 --- a/src/gradient/types/agents/chat/completion_create_response.py +++ b/src/gradient/types/agents/chat/completion_create_response.py @@ -53,6 +53,9 @@ class ChoiceMessage(BaseModel): content: Optional[str] = None """The contents of the message.""" + reasoning_content: Optional[str] = None + """The reasoning content generated by the model.""" + refusal: Optional[str] = None """The refusal message generated by the model.""" diff --git a/src/gradient/types/chat/completion_create_params.py b/src/gradient/types/chat/completion_create_params.py index 17f0024..7874d89 100644 --- a/src/gradient/types/chat/completion_create_params.py +++ b/src/gradient/types/chat/completion_create_params.py @@ -11,9 +11,17 @@ "CompletionCreateParamsBase", "Message", "MessageChatCompletionRequestSystemMessage", + "MessageChatCompletionRequestSystemMessageContentArrayOfContentPart", + "MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestDeveloperMessage", + "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart", + "MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestUserMessage", + "MessageChatCompletionRequestUserMessageContentArrayOfContentPart", + "MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestAssistantMessage", + "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart", + "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1", "MessageChatCompletionRequestAssistantMessageToolCall", "MessageChatCompletionRequestAssistantMessageToolCallFunction", "MessageChatCompletionRequestToolMessage", @@ -157,30 +165,82 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ +class MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestSystemMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestSystemMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): - content: Required[Union[str, SequenceNotStr[str]]] + content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestSystemMessageContentArrayOfContentPart]]] """The contents of the system message.""" role: Required[Literal["system"]] """The role of the messages author, in this case `system`.""" +class MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): - content: Required[Union[str, SequenceNotStr[str]]] + content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestDeveloperMessageContentArrayOfContentPart]]] """The contents of the developer message.""" role: Required[Literal["developer"]] """The role of the messages author, in this case `developer`.""" +class MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestUserMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestUserMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestUserMessage(TypedDict, total=False): - content: Required[Union[str, SequenceNotStr[str]]] + content: Required[Union[str, SequenceNotStr[MessageChatCompletionRequestUserMessageContentArrayOfContentPart]]] """The contents of the user message.""" role: Required[Literal["user"]] """The role of the messages author, in this case `user`.""" +class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1(TypedDict, total=False): + text: Required[str] + """The text content""" + + type: Required[Literal["text"]] + """The type of content part""" + + +MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart: TypeAlias = Union[ + str, MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartUnionMember1 +] + + class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False): arguments: Required[str] """ @@ -209,7 +269,7 @@ class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" - content: Union[str, SequenceNotStr[str], None] + content: Union[str, SequenceNotStr[MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart], None] """The contents of the assistant message.""" tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall] diff --git a/src/gradient/types/chat/completion_create_response.py b/src/gradient/types/chat/completion_create_response.py index 73a09cf..9e157ae 100644 --- a/src/gradient/types/chat/completion_create_response.py +++ b/src/gradient/types/chat/completion_create_response.py @@ -53,6 +53,9 @@ class ChoiceMessage(BaseModel): content: Optional[str] = None """The contents of the message.""" + reasoning_content: Optional[str] = None + """The reasoning content generated by the model.""" + refusal: Optional[str] = None """The refusal message generated by the model.""" diff --git a/src/gradient/types/gpu_droplets/account/__init__.py b/src/gradient/types/gpu_droplets/account/__init__.py index 4cd6497..2d8a05a 100644 --- a/src/gradient/types/gpu_droplets/account/__init__.py +++ b/src/gradient/types/gpu_droplets/account/__init__.py @@ -2,6 +2,7 @@ from __future__ import annotations +from .ssh_keys import SSHKeys as SSHKeys from .key_list_params import KeyListParams as KeyListParams from .key_create_params import KeyCreateParams as KeyCreateParams from .key_list_response import KeyListResponse as KeyListResponse diff --git a/src/gradient/types/gpu_droplets/account/key_create_response.py b/src/gradient/types/gpu_droplets/account/key_create_response.py index 9fe566e..5ce6326 100644 --- a/src/gradient/types/gpu_droplets/account/key_create_response.py +++ b/src/gradient/types/gpu_droplets/account/key_create_response.py @@ -2,38 +2,11 @@ from typing import Optional +from .ssh_keys import SSHKeys from ...._models import BaseModel -__all__ = ["KeyCreateResponse", "SSHKey"] - - -class SSHKey(BaseModel): - name: str - """ - A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - """ - - public_key: str - """The entire public key string that was uploaded. - - Embedded into the root user's `authorized_keys` file if you include this key - during Droplet creation. - """ - - id: Optional[int] = None - """A unique identification number for this key. - - Can be used to embed a specific SSH key into a Droplet. - """ - - fingerprint: Optional[str] = None - """ - A unique identifier that differentiates this key from other keys using a format - that SSH recognizes. The fingerprint is created when the key is added to your - account. - """ +__all__ = ["KeyCreateResponse"] class KeyCreateResponse(BaseModel): - ssh_key: Optional[SSHKey] = None + ssh_key: Optional[SSHKeys] = None diff --git a/src/gradient/types/gpu_droplets/account/key_list_response.py b/src/gradient/types/gpu_droplets/account/key_list_response.py index be4c721..1151043 100644 --- a/src/gradient/types/gpu_droplets/account/key_list_response.py +++ b/src/gradient/types/gpu_droplets/account/key_list_response.py @@ -2,39 +2,12 @@ from typing import List, Optional +from .ssh_keys import SSHKeys from ...._models import BaseModel from ...shared.page_links import PageLinks from ...shared.meta_properties import MetaProperties -__all__ = ["KeyListResponse", "SSHKey"] - - -class SSHKey(BaseModel): - name: str - """ - A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - """ - - public_key: str - """The entire public key string that was uploaded. - - Embedded into the root user's `authorized_keys` file if you include this key - during Droplet creation. - """ - - id: Optional[int] = None - """A unique identification number for this key. - - Can be used to embed a specific SSH key into a Droplet. - """ - - fingerprint: Optional[str] = None - """ - A unique identifier that differentiates this key from other keys using a format - that SSH recognizes. The fingerprint is created when the key is added to your - account. - """ +__all__ = ["KeyListResponse"] class KeyListResponse(BaseModel): @@ -43,4 +16,4 @@ class KeyListResponse(BaseModel): links: Optional[PageLinks] = None - ssh_keys: Optional[List[SSHKey]] = None + ssh_keys: Optional[List[SSHKeys]] = None diff --git a/src/gradient/types/gpu_droplets/account/key_retrieve_response.py b/src/gradient/types/gpu_droplets/account/key_retrieve_response.py index 7cd3215..da6e94d 100644 --- a/src/gradient/types/gpu_droplets/account/key_retrieve_response.py +++ b/src/gradient/types/gpu_droplets/account/key_retrieve_response.py @@ -2,38 +2,11 @@ from typing import Optional +from .ssh_keys import SSHKeys from ...._models import BaseModel -__all__ = ["KeyRetrieveResponse", "SSHKey"] - - -class SSHKey(BaseModel): - name: str - """ - A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - """ - - public_key: str - """The entire public key string that was uploaded. - - Embedded into the root user's `authorized_keys` file if you include this key - during Droplet creation. - """ - - id: Optional[int] = None - """A unique identification number for this key. - - Can be used to embed a specific SSH key into a Droplet. - """ - - fingerprint: Optional[str] = None - """ - A unique identifier that differentiates this key from other keys using a format - that SSH recognizes. The fingerprint is created when the key is added to your - account. - """ +__all__ = ["KeyRetrieveResponse"] class KeyRetrieveResponse(BaseModel): - ssh_key: Optional[SSHKey] = None + ssh_key: Optional[SSHKeys] = None diff --git a/src/gradient/types/gpu_droplets/account/key_update_response.py b/src/gradient/types/gpu_droplets/account/key_update_response.py index 2821e44..54b8142 100644 --- a/src/gradient/types/gpu_droplets/account/key_update_response.py +++ b/src/gradient/types/gpu_droplets/account/key_update_response.py @@ -2,38 +2,11 @@ from typing import Optional +from .ssh_keys import SSHKeys from ...._models import BaseModel -__all__ = ["KeyUpdateResponse", "SSHKey"] - - -class SSHKey(BaseModel): - name: str - """ - A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - """ - - public_key: str - """The entire public key string that was uploaded. - - Embedded into the root user's `authorized_keys` file if you include this key - during Droplet creation. - """ - - id: Optional[int] = None - """A unique identification number for this key. - - Can be used to embed a specific SSH key into a Droplet. - """ - - fingerprint: Optional[str] = None - """ - A unique identifier that differentiates this key from other keys using a format - that SSH recognizes. The fingerprint is created when the key is added to your - account. - """ +__all__ = ["KeyUpdateResponse"] class KeyUpdateResponse(BaseModel): - ssh_key: Optional[SSHKey] = None + ssh_key: Optional[SSHKeys] = None diff --git a/src/gradient/types/gpu_droplets/account/ssh_keys.py b/src/gradient/types/gpu_droplets/account/ssh_keys.py new file mode 100644 index 0000000..8112c18 --- /dev/null +++ b/src/gradient/types/gpu_droplets/account/ssh_keys.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["SSHKeys"] + + +class SSHKeys(BaseModel): + name: str + """ + A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + """ + + public_key: str + """The entire public key string that was uploaded. + + Embedded into the root user's `authorized_keys` file if you include this key + during Droplet creation. + """ + + id: Optional[int] = None + """A unique identification number for this key. + + Can be used to embed a specific SSH key into a Droplet. + """ + + fingerprint: Optional[str] = None + """ + A unique identifier that differentiates this key from other keys using a format + that SSH recognizes. The fingerprint is created when the key is added to your + account. + """ diff --git a/src/gradient/types/images/__init__.py b/src/gradient/types/images/__init__.py new file mode 100644 index 0000000..29634ec --- /dev/null +++ b/src/gradient/types/images/__init__.py @@ -0,0 +1,6 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .generation_create_params import GenerationCreateParams as GenerationCreateParams +from .generation_create_response import GenerationCreateResponse as GenerationCreateResponse diff --git a/src/gradient/types/images/generation_create_params.py b/src/gradient/types/images/generation_create_params.py new file mode 100644 index 0000000..ec8b672 --- /dev/null +++ b/src/gradient/types/images/generation_create_params.py @@ -0,0 +1,100 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["GenerationCreateParamsBase", "GenerationCreateParamsNonStreaming", "GenerationCreateParamsStreaming"] + + +class GenerationCreateParamsBase(TypedDict, total=False): + prompt: Required[str] + """A text description of the desired image(s). + + GPT-IMAGE-1 supports up to 32,000 characters and provides automatic prompt + optimization for best results. + """ + + background: Optional[str] + """The background setting for the image generation. + + GPT-IMAGE-1 supports: transparent, opaque, auto. + """ + + model: str + """The model to use for image generation. + + GPT-IMAGE-1 is the latest model offering the best quality with automatic + optimization and enhanced capabilities. + """ + + moderation: Optional[str] + """The moderation setting for the image generation. + + GPT-IMAGE-1 supports: low, auto. + """ + + n: Optional[int] + """The number of images to generate. GPT-IMAGE-1 only supports n=1.""" + + output_compression: Optional[int] + """The output compression for the image generation. GPT-IMAGE-1 supports: 0-100.""" + + output_format: Optional[str] + """The output format for the image generation. + + GPT-IMAGE-1 supports: png, webp, jpeg. + """ + + partial_images: Optional[int] + """The number of partial image chunks to return during streaming generation. + + This parameter is optional with a default of 0. When stream=true, this must be + greater than 0 to receive progressive updates of the image as it's being + generated. Higher values provide more frequent updates but may increase response + overhead. + """ + + quality: Optional[str] + """The quality of the image that will be generated. + + GPT-IMAGE-1 supports: auto (automatically select best quality), high, medium, + low. + """ + + size: Optional[str] + """The size of the generated images. + + GPT-IMAGE-1 supports: auto (automatically select best size), 1536x1024 + (landscape), 1024x1536 (portrait). + """ + + user: Optional[str] + """ + A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + """ + + +class GenerationCreateParamsNonStreaming(GenerationCreateParamsBase, total=False): + stream: Optional[Literal[False]] + """ + If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + """ + + +class GenerationCreateParamsStreaming(GenerationCreateParamsBase): + stream: Required[Literal[True]] + """ + If set to true, partial image data will be streamed as the image is being + generated. When streaming, the response will be sent as server-sent events with + partial image chunks. When stream is true, partial_images must be greater + than 0. + """ + + +GenerationCreateParams = Union[GenerationCreateParamsNonStreaming, GenerationCreateParamsStreaming] diff --git a/src/gradient/types/images/generation_create_response.py b/src/gradient/types/images/generation_create_response.py new file mode 100644 index 0000000..32757c0 --- /dev/null +++ b/src/gradient/types/images/generation_create_response.py @@ -0,0 +1,63 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["GenerationCreateResponse", "Data", "Usage", "UsageInputTokensDetails"] + + +class Data(BaseModel): + b64_json: str + """The base64-encoded JSON of the generated image. + + GPT-IMAGE-1 returns images in b64_json format only. + """ + + revised_prompt: Optional[str] = None + """The optimized prompt that was used to generate the image. + + GPT-IMAGE-1 automatically optimizes prompts for best results. + """ + + +class UsageInputTokensDetails(BaseModel): + text_tokens: Optional[int] = None + """Number of text tokens in the input""" + + +class Usage(BaseModel): + input_tokens: int + """Number of tokens in the input prompt""" + + total_tokens: int + """Total number of tokens used (input + output)""" + + input_tokens_details: Optional[UsageInputTokensDetails] = None + """Detailed breakdown of input tokens""" + + output_tokens: Optional[int] = None + """Number of tokens in the generated output""" + + +class GenerationCreateResponse(BaseModel): + created: int + """The Unix timestamp (in seconds) of when the images were created""" + + data: List[Data] + """The list of generated images""" + + background: Optional[str] = None + """The background setting used for the image generation""" + + output_format: Optional[str] = None + """The output format of the generated image""" + + quality: Optional[str] = None + """The quality setting used for the image generation""" + + size: Optional[str] = None + """The size of the generated image""" + + usage: Optional[Usage] = None + """Usage statistics for the image generation request""" diff --git a/src/gradient/types/knowledge_bases/__init__.py b/src/gradient/types/knowledge_bases/__init__.py index b23053f..cab865f 100644 --- a/src/gradient/types/knowledge_bases/__init__.py +++ b/src/gradient/types/knowledge_bases/__init__.py @@ -24,6 +24,12 @@ from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam as APIWebCrawlerDataSourceParam from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse +from .data_source_create_presigned_urls_params import ( + DataSourceCreatePresignedURLsParams as DataSourceCreatePresignedURLsParams, +) +from .data_source_create_presigned_urls_response import ( + DataSourceCreatePresignedURLsResponse as DataSourceCreatePresignedURLsResponse, +) from .indexing_job_retrieve_data_sources_response import ( IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, ) diff --git a/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py new file mode 100644 index 0000000..253cbce --- /dev/null +++ b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_params.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import TypedDict + +__all__ = ["DataSourceCreatePresignedURLsParams", "File"] + + +class DataSourceCreatePresignedURLsParams(TypedDict, total=False): + files: Iterable[File] + """A list of files to generate presigned URLs for.""" + + +class File(TypedDict, total=False): + file_name: str + """Local filename""" + + file_size: str + """The size of the file in bytes.""" diff --git a/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py new file mode 100644 index 0000000..c3d172d --- /dev/null +++ b/src/gradient/types/knowledge_bases/data_source_create_presigned_urls_response.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from ..._models import BaseModel + +__all__ = ["DataSourceCreatePresignedURLsResponse", "Upload"] + + +class Upload(BaseModel): + expires_at: Optional[datetime] = None + """The time the url expires at.""" + + object_key: Optional[str] = None + """The unique object key to store the file as.""" + + original_file_name: Optional[str] = None + """The original file name.""" + + presigned_url: Optional[str] = None + """The actual presigned URL the client can use to upload the file directly.""" + + +class DataSourceCreatePresignedURLsResponse(BaseModel): + request_id: Optional[str] = None + """The ID generated for the request for Presigned URLs.""" + + uploads: Optional[List[Upload]] = None + """A list of generated presigned URLs and object keys, one per file.""" diff --git a/src/gradient/types/shared/__init__.py b/src/gradient/types/shared/__init__.py index 6d90845..4fb2986 100644 --- a/src/gradient/types/shared/__init__.py +++ b/src/gradient/types/shared/__init__.py @@ -24,6 +24,9 @@ from .garbage_collection import GarbageCollection as GarbageCollection from .firewall_rule_target import FirewallRuleTarget as FirewallRuleTarget from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk +from .image_gen_stream_event import ImageGenStreamEvent as ImageGenStreamEvent from .subscription_tier_base import SubscriptionTierBase as SubscriptionTierBase +from .image_gen_completed_event import ImageGenCompletedEvent as ImageGenCompletedEvent from .droplet_next_backup_window import DropletNextBackupWindow as DropletNextBackupWindow from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob +from .image_gen_partial_image_event import ImageGenPartialImageEvent as ImageGenPartialImageEvent diff --git a/src/gradient/types/shared/chat_completion_chunk.py b/src/gradient/types/shared/chat_completion_chunk.py index 4dd587f..e30e060 100644 --- a/src/gradient/types/shared/chat_completion_chunk.py +++ b/src/gradient/types/shared/chat_completion_chunk.py @@ -47,6 +47,9 @@ class ChoiceDelta(BaseModel): content: Optional[str] = None """The contents of the chunk message.""" + reasoning_content: Optional[str] = None + """The reasoning content generated by the model.""" + refusal: Optional[str] = None """The refusal message generated by the model.""" diff --git a/src/gradient/types/shared/image_gen_completed_event.py b/src/gradient/types/shared/image_gen_completed_event.py new file mode 100644 index 0000000..cbb282e --- /dev/null +++ b/src/gradient/types/shared/image_gen_completed_event.py @@ -0,0 +1,55 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ImageGenCompletedEvent", "Usage", "UsageInputTokensDetails"] + + +class UsageInputTokensDetails(BaseModel): + image_tokens: int + """The number of image tokens in the input prompt.""" + + text_tokens: int + """The number of text tokens in the input prompt.""" + + +class Usage(BaseModel): + input_tokens: int + """The number of tokens (images and text) in the input prompt.""" + + input_tokens_details: UsageInputTokensDetails + """The input tokens detailed information for the image generation.""" + + output_tokens: int + """The number of image tokens in the output image.""" + + total_tokens: int + """The total number of tokens (images and text) used for the image generation.""" + + +class ImageGenCompletedEvent(BaseModel): + b64_json: str + """Base64-encoded image data, suitable for rendering as an image.""" + + background: Literal["transparent", "opaque", "auto"] + """The background setting for the generated image.""" + + created_at: int + """The Unix timestamp when the event was created.""" + + output_format: Literal["png", "webp", "jpeg"] + """The output format for the generated image.""" + + quality: Literal["low", "medium", "high", "auto"] + """The quality setting for the generated image.""" + + size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] + """The size of the generated image.""" + + type: Literal["image_generation.completed"] + """The type of the event. Always `image_generation.completed`.""" + + usage: Usage + """For `gpt-image-1` only, the token usage information for the image generation.""" diff --git a/src/gradient/types/shared/image_gen_partial_image_event.py b/src/gradient/types/shared/image_gen_partial_image_event.py new file mode 100644 index 0000000..4cc704b --- /dev/null +++ b/src/gradient/types/shared/image_gen_partial_image_event.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ImageGenPartialImageEvent"] + + +class ImageGenPartialImageEvent(BaseModel): + b64_json: str + """Base64-encoded partial image data, suitable for rendering as an image.""" + + background: Literal["transparent", "opaque", "auto"] + """The background setting for the requested image.""" + + created_at: int + """The Unix timestamp when the event was created.""" + + output_format: Literal["png", "webp", "jpeg"] + """The output format for the requested image.""" + + partial_image_index: int + """0-based index for the partial image (streaming).""" + + quality: Literal["low", "medium", "high", "auto"] + """The quality setting for the requested image.""" + + size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] + """The size of the requested image.""" + + type: Literal["image_generation.partial_image"] + """The type of the event. Always `image_generation.partial_image`.""" diff --git a/src/gradient/types/shared/image_gen_stream_event.py b/src/gradient/types/shared/image_gen_stream_event.py new file mode 100644 index 0000000..30e9571 --- /dev/null +++ b/src/gradient/types/shared/image_gen_stream_event.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .image_gen_completed_event import ImageGenCompletedEvent +from .image_gen_partial_image_event import ImageGenPartialImageEvent + +__all__ = ["ImageGenStreamEvent"] + +ImageGenStreamEvent: TypeAlias = Annotated[ + Union[ImageGenPartialImageEvent, ImageGenCompletedEvent], PropertyInfo(discriminator="type") +] diff --git a/src/gradient/types/shared/size.py b/src/gradient/types/shared/size.py index 42b0b41..73abb7d 100644 --- a/src/gradient/types/shared/size.py +++ b/src/gradient/types/shared/size.py @@ -50,7 +50,7 @@ class Size(BaseModel): regions: List[str] """ An array containing the region slugs where this size is available for Droplet - creates. regions:read is required to view. + creates. """ slug: str diff --git a/tests/api_resources/images/__init__.py b/tests/api_resources/images/__init__.py new file mode 100644 index 0000000..fd8019a --- /dev/null +++ b/tests/api_resources/images/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/images/test_generations.py b/tests/api_resources/images/test_generations.py new file mode 100644 index 0000000..c9c6756 --- /dev/null +++ b/tests/api_resources/images/test_generations.py @@ -0,0 +1,240 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradient import Gradient, AsyncGradient +from tests.utils import assert_matches_type +from gradient.types.images import GenerationCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestGenerations: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_overload_1(self, client: Gradient) -> None: + generation = client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + ) + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params_overload_1(self, client: Gradient) -> None: + generation = client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + background="auto", + model="openai-gpt-image-1", + moderation="auto", + n=1, + output_compression=100, + output_format="png", + partial_images=1, + quality="auto", + size="auto", + stream=False, + user="user-1234", + ) + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create_overload_1(self, client: Gradient) -> None: + response = client.images.generations.with_raw_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + generation = response.parse() + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create_overload_1(self, client: Gradient) -> None: + with client.images.generations.with_streaming_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + generation = response.parse() + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_overload_2(self, client: Gradient) -> None: + generation_stream = client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + ) + generation_stream.response.close() + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_with_all_params_overload_2(self, client: Gradient) -> None: + generation_stream = client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + background="auto", + model="openai-gpt-image-1", + moderation="auto", + n=1, + output_compression=100, + output_format="png", + partial_images=1, + quality="auto", + size="auto", + user="user-1234", + ) + generation_stream.response.close() + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create_overload_2(self, client: Gradient) -> None: + response = client.images.generations.with_raw_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + ) + + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + stream = response.parse() + stream.close() + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create_overload_2(self, client: Gradient) -> None: + with client.images.generations.with_streaming_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = response.parse() + stream.close() + + assert cast(Any, response.is_closed) is True + + +class TestAsyncGenerations: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncGradient) -> None: + generation = await async_client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + ) + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradient) -> None: + generation = await async_client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + background="auto", + model="openai-gpt-image-1", + moderation="auto", + n=1, + output_compression=100, + output_format="png", + partial_images=1, + quality="auto", + size="auto", + stream=False, + user="user-1234", + ) + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncGradient) -> None: + response = await async_client.images.generations.with_raw_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + generation = await response.parse() + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradient) -> None: + async with async_client.images.generations.with_streaming_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + generation = await response.parse() + assert_matches_type(GenerationCreateResponse, generation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradient) -> None: + generation_stream = await async_client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + ) + await generation_stream.response.aclose() + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradient) -> None: + generation_stream = await async_client.images.generations.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + background="auto", + model="openai-gpt-image-1", + moderation="auto", + n=1, + output_compression=100, + output_format="png", + partial_images=1, + quality="auto", + size="auto", + user="user-1234", + ) + await generation_stream.response.aclose() + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradient) -> None: + response = await async_client.images.generations.with_raw_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + ) + + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + stream = await response.parse() + await stream.close() + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradient) -> None: + async with async_client.images.generations.with_streaming_response.create( + prompt="A cute baby sea otter floating on its back in calm blue water", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = await response.parse() + await stream.close() + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index bd7158d..4214f88 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -13,6 +13,7 @@ DataSourceListResponse, DataSourceCreateResponse, DataSourceDeleteResponse, + DataSourceCreatePresignedURLsResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -195,6 +196,47 @@ def test_path_params_delete(self, client: Gradient) -> None: knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_presigned_urls(self, client: Gradient) -> None: + data_source = client.knowledge_bases.data_sources.create_presigned_urls() + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_method_create_presigned_urls_with_all_params(self, client: Gradient) -> None: + data_source = client.knowledge_bases.data_sources.create_presigned_urls( + files=[ + { + "file_name": "example name", + "file_size": "file_size", + } + ], + ) + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_raw_response_create_presigned_urls(self, client: Gradient) -> None: + response = client.knowledge_bases.data_sources.with_raw_response.create_presigned_urls() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = response.parse() + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + def test_streaming_response_create_presigned_urls(self, client: Gradient) -> None: + with client.knowledge_bases.data_sources.with_streaming_response.create_presigned_urls() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = response.parse() + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + class TestAsyncDataSources: parametrize = pytest.mark.parametrize( @@ -374,3 +416,46 @@ async def test_path_params_delete(self, async_client: AsyncGradient) -> None: data_source_uuid="", knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_presigned_urls(self, async_client: AsyncGradient) -> None: + data_source = await async_client.knowledge_bases.data_sources.create_presigned_urls() + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_method_create_presigned_urls_with_all_params(self, async_client: AsyncGradient) -> None: + data_source = await async_client.knowledge_bases.data_sources.create_presigned_urls( + files=[ + { + "file_name": "example name", + "file_size": "file_size", + } + ], + ) + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_raw_response_create_presigned_urls(self, async_client: AsyncGradient) -> None: + response = await async_client.knowledge_bases.data_sources.with_raw_response.create_presigned_urls() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = await response.parse() + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + @pytest.mark.skip(reason="Prism tests are disabled") + @parametrize + async def test_streaming_response_create_presigned_urls(self, async_client: AsyncGradient) -> None: + async with ( + async_client.knowledge_bases.data_sources.with_streaming_response.create_presigned_urls() + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = await response.parse() + assert_matches_type(DataSourceCreatePresignedURLsResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True