From 7d7d879480a1d85ac8329cb98fa8da8afd8fee12 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 29 Jul 2025 19:22:37 +0000
Subject: [PATCH 01/10] feat(api): update via SDK Studio
---
.stats.yml | 6 +-
api.md | 13 +-
src/do_gradientai/resources/models/models.py | 156 -----------------
src/do_gradientai/types/__init__.py | 7 +-
.../types/model_list_response.py | 28 ---
.../types/model_retrieve_response.py | 21 ---
tests/api_resources/test_models.py | 164 ------------------
7 files changed, 5 insertions(+), 390 deletions(-)
delete mode 100644 src/do_gradientai/types/model_list_response.py
delete mode 100644 src/do_gradientai/types/model_retrieve_response.py
delete mode 100644 tests/api_resources/test_models.py
diff --git a/.stats.yml b/.stats.yml
index 718d3432..b459a4a0 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 168
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml
-openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d
+configured_endpoints: 166
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-015417b36365dfcb32166e67379c38de8bf5127c33dff646097a819a7b4dc588.yml
+openapi_spec_hash: d7d811c13cc79f15d82fe680cf425859
config_hash: 5cf9c7359c13307780aa25d0203b0b35
diff --git a/api.md b/api.md
index 5d6e5491..4460c4db 100644
--- a/api.md
+++ b/api.md
@@ -800,20 +800,9 @@ Methods:
Types:
```python
-from do_gradientai.types import (
- APIAgreement,
- APIModel,
- APIModelVersion,
- ModelRetrieveResponse,
- ModelListResponse,
-)
+from do_gradientai.types import APIAgreement, APIModel, APIModelVersion
```
-Methods:
-
-- client.models.retrieve(model) -> ModelRetrieveResponse
-- client.models.list() -> ModelListResponse
-
## Providers
### Anthropic
diff --git a/src/do_gradientai/resources/models/models.py b/src/do_gradientai/resources/models/models.py
index 41f2eabd..d98f59cf 100644
--- a/src/do_gradientai/resources/models/models.py
+++ b/src/do_gradientai/resources/models/models.py
@@ -2,18 +2,8 @@
from __future__ import annotations
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
from .providers.providers import (
ProvidersResource,
AsyncProvidersResource,
@@ -22,8 +12,6 @@
ProvidersResourceWithStreamingResponse,
AsyncProvidersResourceWithStreamingResponse,
)
-from ...types.model_list_response import ModelListResponse
-from ...types.model_retrieve_response import ModelRetrieveResponse
__all__ = ["ModelsResource", "AsyncModelsResource"]
@@ -52,64 +40,6 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse:
"""
return ModelsResourceWithStreamingResponse(self)
- def retrieve(
- self,
- model: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModelRetrieveResponse:
- """
- Retrieves a model instance, providing basic information about the model such as
- the owner and permissioning.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not model:
- raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
- return self._get(
- f"/models/{model}"
- if self._client._base_url_overridden
- else f"https://inference.do-ai.run/v1/models/{model}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModelRetrieveResponse,
- )
-
- def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModelListResponse:
- """
- Lists the currently available models, and provides basic information about each
- one such as the owner and availability.
- """
- return self._get(
- "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModelListResponse,
- )
-
class AsyncModelsResource(AsyncAPIResource):
@cached_property
@@ -135,76 +65,11 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse:
"""
return AsyncModelsResourceWithStreamingResponse(self)
- async def retrieve(
- self,
- model: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModelRetrieveResponse:
- """
- Retrieves a model instance, providing basic information about the model such as
- the owner and permissioning.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not model:
- raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
- return await self._get(
- f"/models/{model}"
- if self._client._base_url_overridden
- else f"https://inference.do-ai.run/v1/models/{model}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModelRetrieveResponse,
- )
-
- async def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModelListResponse:
- """
- Lists the currently available models, and provides basic information about each
- one such as the owner and availability.
- """
- return await self._get(
- "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModelListResponse,
- )
-
class ModelsResourceWithRawResponse:
def __init__(self, models: ModelsResource) -> None:
self._models = models
- self.retrieve = to_raw_response_wrapper(
- models.retrieve,
- )
- self.list = to_raw_response_wrapper(
- models.list,
- )
-
@cached_property
def providers(self) -> ProvidersResourceWithRawResponse:
return ProvidersResourceWithRawResponse(self._models.providers)
@@ -214,13 +79,6 @@ class AsyncModelsResourceWithRawResponse:
def __init__(self, models: AsyncModelsResource) -> None:
self._models = models
- self.retrieve = async_to_raw_response_wrapper(
- models.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- models.list,
- )
-
@cached_property
def providers(self) -> AsyncProvidersResourceWithRawResponse:
return AsyncProvidersResourceWithRawResponse(self._models.providers)
@@ -230,13 +88,6 @@ class ModelsResourceWithStreamingResponse:
def __init__(self, models: ModelsResource) -> None:
self._models = models
- self.retrieve = to_streamed_response_wrapper(
- models.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- models.list,
- )
-
@cached_property
def providers(self) -> ProvidersResourceWithStreamingResponse:
return ProvidersResourceWithStreamingResponse(self._models.providers)
@@ -246,13 +97,6 @@ class AsyncModelsResourceWithStreamingResponse:
def __init__(self, models: AsyncModelsResource) -> None:
self._models = models
- self.retrieve = async_to_streamed_response_wrapper(
- models.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- models.list,
- )
-
@cached_property
def providers(self) -> AsyncProvidersResourceWithStreamingResponse:
return AsyncProvidersResourceWithStreamingResponse(self._models.providers)
diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py
index 9170b699..be03d4c5 100644
--- a/src/do_gradientai/types/__init__.py
+++ b/src/do_gradientai/types/__init__.py
@@ -57,7 +57,6 @@
from .agent_create_params import AgentCreateParams as AgentCreateParams
from .agent_list_response import AgentListResponse as AgentListResponse
from .agent_update_params import AgentUpdateParams as AgentUpdateParams
-from .model_list_response import ModelListResponse as ModelListResponse
from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod
from .region_list_response import RegionListResponse as RegionListResponse
from .agent_create_response import AgentCreateResponse as AgentCreateResponse
@@ -68,11 +67,7 @@
from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse
from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo
from .gpu_droplet_list_params import GPUDropletListParams as GPUDropletListParams
-from .model_retrieve_response import ModelRetrieveResponse as ModelRetrieveResponse
-from .agents.evaluation_metrics import workspaces # type: ignore # noqa: F401
-from .api_deployment_visibility import (
- APIDeploymentVisibility as APIDeploymentVisibility,
-)
+from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility
from .gpu_droplet_create_params import GPUDropletCreateParams as GPUDropletCreateParams
from .gpu_droplet_list_response import GPUDropletListResponse as GPUDropletListResponse
from .agent_update_status_params import (
diff --git a/src/do_gradientai/types/model_list_response.py b/src/do_gradientai/types/model_list_response.py
deleted file mode 100644
index 5915bdd1..00000000
--- a/src/do_gradientai/types/model_list_response.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["ModelListResponse", "Data"]
-
-
-class Data(BaseModel):
- id: str
- """The model identifier, which can be referenced in the API endpoints."""
-
- created: int
- """The Unix timestamp (in seconds) when the model was created."""
-
- object: Literal["model"]
- """The object type, which is always "model"."""
-
- owned_by: str
- """The organization that owns the model."""
-
-
-class ModelListResponse(BaseModel):
- data: List[Data]
-
- object: Literal["list"]
diff --git a/src/do_gradientai/types/model_retrieve_response.py b/src/do_gradientai/types/model_retrieve_response.py
deleted file mode 100644
index dd5de863..00000000
--- a/src/do_gradientai/types/model_retrieve_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from .._models import BaseModel
-
-__all__ = ["ModelRetrieveResponse"]
-
-
-class ModelRetrieveResponse(BaseModel):
- id: str
- """The model identifier, which can be referenced in the API endpoints."""
-
- created: int
- """The Unix timestamp (in seconds) when the model was created."""
-
- object: Literal["model"]
- """The object type, which is always "model"."""
-
- owned_by: str
- """The organization that owns the model."""
diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py
deleted file mode 100644
index 803c5d5a..00000000
--- a/tests/api_resources/test_models.py
+++ /dev/null
@@ -1,164 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from do_gradientai import GradientAI, AsyncGradientAI
-from do_gradientai.types import ModelListResponse, ModelRetrieveResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestModels:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: GradientAI) -> None:
- model = client.models.retrieve(
- "llama3-8b-instruct",
- )
- assert_matches_type(ModelRetrieveResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: GradientAI) -> None:
- response = client.models.with_raw_response.retrieve(
- "llama3-8b-instruct",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- model = response.parse()
- assert_matches_type(ModelRetrieveResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: GradientAI) -> None:
- with client.models.with_streaming_response.retrieve(
- "llama3-8b-instruct",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- model = response.parse()
- assert_matches_type(ModelRetrieveResponse, model, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"):
- client.models.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: GradientAI) -> None:
- model = client.models.list()
- assert_matches_type(ModelListResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: GradientAI) -> None:
- response = client.models.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- model = response.parse()
- assert_matches_type(ModelListResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: GradientAI) -> None:
- with client.models.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- model = response.parse()
- assert_matches_type(ModelListResponse, model, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncModels:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
- model = await async_client.models.retrieve(
- "llama3-8b-instruct",
- )
- assert_matches_type(ModelRetrieveResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.models.with_raw_response.retrieve(
- "llama3-8b-instruct",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- model = await response.parse()
- assert_matches_type(ModelRetrieveResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- async with async_client.models.with_streaming_response.retrieve(
- "llama3-8b-instruct",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- model = await response.parse()
- assert_matches_type(ModelRetrieveResponse, model, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"):
- await async_client.models.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncGradientAI) -> None:
- model = await async_client.models.list()
- assert_matches_type(ModelListResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.models.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- model = await response.parse()
- assert_matches_type(ModelListResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
- async with async_client.models.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- model = await response.parse()
- assert_matches_type(ModelListResponse, model, path=["response"])
-
- assert cast(Any, response.is_closed) is True
From 297a81fd81d72aee2bf934057749205275fbc85a Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 29 Jul 2025 19:02:45 +0000
Subject: [PATCH 02/10] codegen metadata
---
.stats.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.stats.yml b/.stats.yml
index b459a4a0..290b3f05 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 166
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-015417b36365dfcb32166e67379c38de8bf5127c33dff646097a819a7b4dc588.yml
openapi_spec_hash: d7d811c13cc79f15d82fe680cf425859
-config_hash: 5cf9c7359c13307780aa25d0203b0b35
+config_hash: 52c6ddda6cb14926853245342631bb62
From 2252d77e753a1407a1b851e01f4dcdbf1d4e0697 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 29 Jul 2025 19:04:17 +0000
Subject: [PATCH 03/10] feat(api): update via SDK Studio
---
.stats.yml | 4 +-
api.md | 58 --
src/do_gradientai/_client.py | 48 +-
src/do_gradientai/resources/__init__.py | 14 -
.../resources/models/__init__.py | 33 -
src/do_gradientai/resources/models/models.py | 102 ---
.../resources/models/providers/__init__.py | 47 --
.../resources/models/providers/anthropic.py | 711 ------------------
.../resources/models/providers/openai.py | 707 -----------------
.../resources/models/providers/providers.py | 134 ----
src/do_gradientai/types/__init__.py | 12 -
.../evaluation_metrics/model_list_response.py | 59 +-
src/do_gradientai/types/api_agent_model.py | 29 +-
src/do_gradientai/types/api_agreement.py | 17 -
src/do_gradientai/types/api_model.py | 42 --
src/do_gradientai/types/api_model_version.py | 18 -
src/do_gradientai/types/models/__init__.py | 3 -
.../types/models/providers/__init__.py | 24 -
.../providers/anthropic_create_params.py | 15 -
.../providers/anthropic_create_response.py | 13 -
.../providers/anthropic_delete_response.py | 13 -
.../providers/anthropic_list_agents_params.py | 15 -
.../anthropic_list_agents_response.py | 24 -
.../models/providers/anthropic_list_params.py | 15 -
.../providers/anthropic_list_response.py | 21 -
.../providers/anthropic_retrieve_response.py | 13 -
.../providers/anthropic_update_params.py | 20 -
.../providers/anthropic_update_response.py | 13 -
.../models/providers/openai_create_params.py | 15 -
.../providers/openai_create_response.py | 13 -
.../providers/openai_delete_response.py | 13 -
.../models/providers/openai_list_params.py | 15 -
.../models/providers/openai_list_response.py | 21 -
.../openai_retrieve_agents_params.py | 15 -
.../openai_retrieve_agents_response.py | 24 -
.../providers/openai_retrieve_response.py | 13 -
.../models/providers/openai_update_params.py | 20 -
.../providers/openai_update_response.py | 13 -
tests/api_resources/models/__init__.py | 1 -
.../models/providers/__init__.py | 1 -
.../models/providers/test_anthropic.py | 557 --------------
.../models/providers/test_openai.py | 557 --------------
42 files changed, 83 insertions(+), 3419 deletions(-)
delete mode 100644 src/do_gradientai/resources/models/__init__.py
delete mode 100644 src/do_gradientai/resources/models/models.py
delete mode 100644 src/do_gradientai/resources/models/providers/__init__.py
delete mode 100644 src/do_gradientai/resources/models/providers/anthropic.py
delete mode 100644 src/do_gradientai/resources/models/providers/openai.py
delete mode 100644 src/do_gradientai/resources/models/providers/providers.py
delete mode 100644 src/do_gradientai/types/api_agreement.py
delete mode 100644 src/do_gradientai/types/api_model.py
delete mode 100644 src/do_gradientai/types/api_model_version.py
delete mode 100644 src/do_gradientai/types/models/__init__.py
delete mode 100644 src/do_gradientai/types/models/providers/__init__.py
delete mode 100644 src/do_gradientai/types/models/providers/anthropic_create_params.py
delete mode 100644 src/do_gradientai/types/models/providers/anthropic_create_response.py
delete mode 100644 src/do_gradientai/types/models/providers/anthropic_delete_response.py
delete mode 100644 src/do_gradientai/types/models/providers/anthropic_list_agents_params.py
delete mode 100644 src/do_gradientai/types/models/providers/anthropic_list_agents_response.py
delete mode 100644 src/do_gradientai/types/models/providers/anthropic_list_params.py
delete mode 100644 src/do_gradientai/types/models/providers/anthropic_list_response.py
delete mode 100644 src/do_gradientai/types/models/providers/anthropic_retrieve_response.py
delete mode 100644 src/do_gradientai/types/models/providers/anthropic_update_params.py
delete mode 100644 src/do_gradientai/types/models/providers/anthropic_update_response.py
delete mode 100644 src/do_gradientai/types/models/providers/openai_create_params.py
delete mode 100644 src/do_gradientai/types/models/providers/openai_create_response.py
delete mode 100644 src/do_gradientai/types/models/providers/openai_delete_response.py
delete mode 100644 src/do_gradientai/types/models/providers/openai_list_params.py
delete mode 100644 src/do_gradientai/types/models/providers/openai_list_response.py
delete mode 100644 src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py
delete mode 100644 src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py
delete mode 100644 src/do_gradientai/types/models/providers/openai_retrieve_response.py
delete mode 100644 src/do_gradientai/types/models/providers/openai_update_params.py
delete mode 100644 src/do_gradientai/types/models/providers/openai_update_response.py
delete mode 100644 tests/api_resources/models/__init__.py
delete mode 100644 tests/api_resources/models/providers/__init__.py
delete mode 100644 tests/api_resources/models/providers/test_anthropic.py
delete mode 100644 tests/api_resources/models/providers/test_openai.py
diff --git a/.stats.yml b/.stats.yml
index 290b3f05..1b2683e0 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 166
+configured_endpoints: 154
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-015417b36365dfcb32166e67379c38de8bf5127c33dff646097a819a7b4dc588.yml
openapi_spec_hash: d7d811c13cc79f15d82fe680cf425859
-config_hash: 52c6ddda6cb14926853245342631bb62
+config_hash: 8833ecca3133e27ffb01c9c12013e938
diff --git a/api.md b/api.md
index 4460c4db..9e2ad010 100644
--- a/api.md
+++ b/api.md
@@ -795,64 +795,6 @@ Methods:
- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse
- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse
-# Models
-
-Types:
-
-```python
-from do_gradientai.types import APIAgreement, APIModel, APIModelVersion
-```
-
-## Providers
-
-### Anthropic
-
-Types:
-
-```python
-from do_gradientai.types.models.providers import (
- AnthropicCreateResponse,
- AnthropicRetrieveResponse,
- AnthropicUpdateResponse,
- AnthropicListResponse,
- AnthropicDeleteResponse,
- AnthropicListAgentsResponse,
-)
-```
-
-Methods:
-
-- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse
-- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse
-- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse
-- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse
-- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse
-- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse
-
-### OpenAI
-
-Types:
-
-```python
-from do_gradientai.types.models.providers import (
- OpenAICreateResponse,
- OpenAIRetrieveResponse,
- OpenAIUpdateResponse,
- OpenAIListResponse,
- OpenAIDeleteResponse,
- OpenAIRetrieveAgentsResponse,
-)
-```
-
-Methods:
-
-- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse
-- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse
-- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse
-- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse
-- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse
-- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse
-
# Regions
Types:
diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py
index e08e55b2..bd3b9908 100644
--- a/src/do_gradientai/_client.py
+++ b/src/do_gradientai/_client.py
@@ -32,15 +32,7 @@
)
if TYPE_CHECKING:
- from .resources import (
- chat,
- agents,
- models,
- regions,
- inference,
- gpu_droplets,
- knowledge_bases,
- )
+ from .resources import chat, agents, regions, inference, gpu_droplets, knowledge_bases
from .resources.regions import RegionsResource, AsyncRegionsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
from .resources.gpu_droplets import (
@@ -56,8 +48,6 @@
load_balancers,
)
from .resources.agents.agents import AgentsResource, AsyncAgentsResource
- from .resources.models.models import ModelsResource, AsyncModelsResource
- from .resources.gpu_droplets.sizes import SizesResource, AsyncSizesResource
from .resources.inference.inference import InferenceResource, AsyncInferenceResource
from .resources.gpu_droplets.snapshots import (
SnapshotsResource,
@@ -220,12 +210,6 @@ def knowledge_bases(self) -> KnowledgeBasesResource:
return KnowledgeBasesResource(self)
- @cached_property
- def models(self) -> ModelsResource:
- from .resources.models import ModelsResource
-
- return ModelsResource(self)
-
@cached_property
def regions(self) -> RegionsResource:
from .resources.regions import RegionsResource
@@ -530,12 +514,6 @@ def knowledge_bases(self) -> AsyncKnowledgeBasesResource:
return AsyncKnowledgeBasesResource(self)
- @cached_property
- def models(self) -> AsyncModelsResource:
- from .resources.models import AsyncModelsResource
-
- return AsyncModelsResource(self)
-
@cached_property
def regions(self) -> AsyncRegionsResource:
from .resources.regions import AsyncRegionsResource
@@ -760,12 +738,6 @@ def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawRespon
return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases)
- @cached_property
- def models(self) -> models.ModelsResourceWithRawResponse:
- from .resources.models import ModelsResourceWithRawResponse
-
- return ModelsResourceWithRawResponse(self._client.models)
-
@cached_property
def regions(self) -> regions.RegionsResourceWithRawResponse:
from .resources.regions import RegionsResourceWithRawResponse
@@ -861,12 +833,6 @@ def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawR
return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases)
- @cached_property
- def models(self) -> models.AsyncModelsResourceWithRawResponse:
- from .resources.models import AsyncModelsResourceWithRawResponse
-
- return AsyncModelsResourceWithRawResponse(self._client.models)
-
@cached_property
def regions(self) -> regions.AsyncRegionsResourceWithRawResponse:
from .resources.regions import AsyncRegionsResourceWithRawResponse
@@ -968,12 +934,6 @@ def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreaming
return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases)
- @cached_property
- def models(self) -> models.ModelsResourceWithStreamingResponse:
- from .resources.models import ModelsResourceWithStreamingResponse
-
- return ModelsResourceWithStreamingResponse(self._client.models)
-
@cached_property
def regions(self) -> regions.RegionsResourceWithStreamingResponse:
from .resources.regions import RegionsResourceWithStreamingResponse
@@ -1075,12 +1035,6 @@ def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStre
return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases)
- @cached_property
- def models(self) -> models.AsyncModelsResourceWithStreamingResponse:
- from .resources.models import AsyncModelsResourceWithStreamingResponse
-
- return AsyncModelsResourceWithStreamingResponse(self._client.models)
-
@cached_property
def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse:
from .resources.regions import AsyncRegionsResourceWithStreamingResponse
diff --git a/src/do_gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py
index 45abd6a3..55df767c 100644
--- a/src/do_gradientai/resources/__init__.py
+++ b/src/do_gradientai/resources/__init__.py
@@ -16,14 +16,6 @@
AgentsResourceWithStreamingResponse,
AsyncAgentsResourceWithStreamingResponse,
)
-from .models import (
- ModelsResource,
- AsyncModelsResource,
- ModelsResourceWithRawResponse,
- AsyncModelsResourceWithRawResponse,
- ModelsResourceWithStreamingResponse,
- AsyncModelsResourceWithStreamingResponse,
-)
from .regions import (
RegionsResource,
AsyncRegionsResource,
@@ -88,12 +80,6 @@
"AsyncKnowledgeBasesResourceWithRawResponse",
"KnowledgeBasesResourceWithStreamingResponse",
"AsyncKnowledgeBasesResourceWithStreamingResponse",
- "ModelsResource",
- "AsyncModelsResource",
- "ModelsResourceWithRawResponse",
- "AsyncModelsResourceWithRawResponse",
- "ModelsResourceWithStreamingResponse",
- "AsyncModelsResourceWithStreamingResponse",
"RegionsResource",
"AsyncRegionsResource",
"RegionsResourceWithRawResponse",
diff --git a/src/do_gradientai/resources/models/__init__.py b/src/do_gradientai/resources/models/__init__.py
deleted file mode 100644
index e30dd201..00000000
--- a/src/do_gradientai/resources/models/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .models import (
- ModelsResource,
- AsyncModelsResource,
- ModelsResourceWithRawResponse,
- AsyncModelsResourceWithRawResponse,
- ModelsResourceWithStreamingResponse,
- AsyncModelsResourceWithStreamingResponse,
-)
-from .providers import (
- ProvidersResource,
- AsyncProvidersResource,
- ProvidersResourceWithRawResponse,
- AsyncProvidersResourceWithRawResponse,
- ProvidersResourceWithStreamingResponse,
- AsyncProvidersResourceWithStreamingResponse,
-)
-
-__all__ = [
- "ProvidersResource",
- "AsyncProvidersResource",
- "ProvidersResourceWithRawResponse",
- "AsyncProvidersResourceWithRawResponse",
- "ProvidersResourceWithStreamingResponse",
- "AsyncProvidersResourceWithStreamingResponse",
- "ModelsResource",
- "AsyncModelsResource",
- "ModelsResourceWithRawResponse",
- "AsyncModelsResourceWithRawResponse",
- "ModelsResourceWithStreamingResponse",
- "AsyncModelsResourceWithStreamingResponse",
-]
diff --git a/src/do_gradientai/resources/models/models.py b/src/do_gradientai/resources/models/models.py
deleted file mode 100644
index d98f59cf..00000000
--- a/src/do_gradientai/resources/models/models.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from .providers.providers import (
- ProvidersResource,
- AsyncProvidersResource,
- ProvidersResourceWithRawResponse,
- AsyncProvidersResourceWithRawResponse,
- ProvidersResourceWithStreamingResponse,
- AsyncProvidersResourceWithStreamingResponse,
-)
-
-__all__ = ["ModelsResource", "AsyncModelsResource"]
-
-
-class ModelsResource(SyncAPIResource):
- @cached_property
- def providers(self) -> ProvidersResource:
- return ProvidersResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> ModelsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return ModelsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ModelsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return ModelsResourceWithStreamingResponse(self)
-
-
-class AsyncModelsResource(AsyncAPIResource):
- @cached_property
- def providers(self) -> AsyncProvidersResource:
- return AsyncProvidersResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncModelsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return AsyncModelsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return AsyncModelsResourceWithStreamingResponse(self)
-
-
-class ModelsResourceWithRawResponse:
- def __init__(self, models: ModelsResource) -> None:
- self._models = models
-
- @cached_property
- def providers(self) -> ProvidersResourceWithRawResponse:
- return ProvidersResourceWithRawResponse(self._models.providers)
-
-
-class AsyncModelsResourceWithRawResponse:
- def __init__(self, models: AsyncModelsResource) -> None:
- self._models = models
-
- @cached_property
- def providers(self) -> AsyncProvidersResourceWithRawResponse:
- return AsyncProvidersResourceWithRawResponse(self._models.providers)
-
-
-class ModelsResourceWithStreamingResponse:
- def __init__(self, models: ModelsResource) -> None:
- self._models = models
-
- @cached_property
- def providers(self) -> ProvidersResourceWithStreamingResponse:
- return ProvidersResourceWithStreamingResponse(self._models.providers)
-
-
-class AsyncModelsResourceWithStreamingResponse:
- def __init__(self, models: AsyncModelsResource) -> None:
- self._models = models
-
- @cached_property
- def providers(self) -> AsyncProvidersResourceWithStreamingResponse:
- return AsyncProvidersResourceWithStreamingResponse(self._models.providers)
diff --git a/src/do_gradientai/resources/models/providers/__init__.py b/src/do_gradientai/resources/models/providers/__init__.py
deleted file mode 100644
index 1731e057..00000000
--- a/src/do_gradientai/resources/models/providers/__init__.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .openai import (
- OpenAIResource,
- AsyncOpenAIResource,
- OpenAIResourceWithRawResponse,
- AsyncOpenAIResourceWithRawResponse,
- OpenAIResourceWithStreamingResponse,
- AsyncOpenAIResourceWithStreamingResponse,
-)
-from .anthropic import (
- AnthropicResource,
- AsyncAnthropicResource,
- AnthropicResourceWithRawResponse,
- AsyncAnthropicResourceWithRawResponse,
- AnthropicResourceWithStreamingResponse,
- AsyncAnthropicResourceWithStreamingResponse,
-)
-from .providers import (
- ProvidersResource,
- AsyncProvidersResource,
- ProvidersResourceWithRawResponse,
- AsyncProvidersResourceWithRawResponse,
- ProvidersResourceWithStreamingResponse,
- AsyncProvidersResourceWithStreamingResponse,
-)
-
-__all__ = [
- "AnthropicResource",
- "AsyncAnthropicResource",
- "AnthropicResourceWithRawResponse",
- "AsyncAnthropicResourceWithRawResponse",
- "AnthropicResourceWithStreamingResponse",
- "AsyncAnthropicResourceWithStreamingResponse",
- "OpenAIResource",
- "AsyncOpenAIResource",
- "OpenAIResourceWithRawResponse",
- "AsyncOpenAIResourceWithRawResponse",
- "OpenAIResourceWithStreamingResponse",
- "AsyncOpenAIResourceWithStreamingResponse",
- "ProvidersResource",
- "AsyncProvidersResource",
- "ProvidersResourceWithRawResponse",
- "AsyncProvidersResourceWithRawResponse",
- "ProvidersResourceWithStreamingResponse",
- "AsyncProvidersResourceWithStreamingResponse",
-]
diff --git a/src/do_gradientai/resources/models/providers/anthropic.py b/src/do_gradientai/resources/models/providers/anthropic.py
deleted file mode 100644
index e570be51..00000000
--- a/src/do_gradientai/resources/models/providers/anthropic.py
+++ /dev/null
@@ -1,711 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.models.providers import (
- anthropic_list_params,
- anthropic_create_params,
- anthropic_update_params,
- anthropic_list_agents_params,
-)
-from ....types.models.providers.anthropic_list_response import AnthropicListResponse
-from ....types.models.providers.anthropic_create_response import AnthropicCreateResponse
-from ....types.models.providers.anthropic_delete_response import AnthropicDeleteResponse
-from ....types.models.providers.anthropic_update_response import AnthropicUpdateResponse
-from ....types.models.providers.anthropic_retrieve_response import AnthropicRetrieveResponse
-from ....types.models.providers.anthropic_list_agents_response import AnthropicListAgentsResponse
-
-__all__ = ["AnthropicResource", "AsyncAnthropicResource"]
-
-
-class AnthropicResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AnthropicResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return AnthropicResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return AnthropicResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AnthropicCreateResponse:
- """
- To create an Anthropic API key, send a POST request to
- `/v2/gen-ai/anthropic/keys`.
-
- Args:
- api_key: Anthropic API key
-
- name: Name of the key
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v2/gen-ai/anthropic/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
- body=maybe_transform(
- {
- "api_key": api_key,
- "name": name,
- },
- anthropic_create_params.AnthropicCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AnthropicCreateResponse,
- )
-
- def retrieve(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AnthropicRetrieveResponse:
- """
- To retrieve details of an Anthropic API key, send a GET request to
- `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return self._get(
- f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AnthropicRetrieveResponse,
- )
-
- def update(
- self,
- path_api_key_uuid: str,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- body_api_key_uuid: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AnthropicUpdateResponse:
- """
- To update an Anthropic API key, send a PUT request to
- `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
-
- Args:
- api_key: Anthropic API key
-
- body_api_key_uuid: API key ID
-
- name: Name of the key
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not path_api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
- return self._put(
- f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}",
- body=maybe_transform(
- {
- "api_key": api_key,
- "body_api_key_uuid": body_api_key_uuid,
- "name": name,
- },
- anthropic_update_params.AnthropicUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AnthropicUpdateResponse,
- )
-
- def list(
- self,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AnthropicListResponse:
- """
- To list all Anthropic API keys, send a GET request to
- `/v2/gen-ai/anthropic/keys`.
-
- Args:
- page: Page number.
-
- per_page: Items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/v2/gen-ai/anthropic/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- anthropic_list_params.AnthropicListParams,
- ),
- ),
- cast_to=AnthropicListResponse,
- )
-
- def delete(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AnthropicDeleteResponse:
- """
- To delete an Anthropic API key, send a DELETE request to
- `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return self._delete(
- f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AnthropicDeleteResponse,
- )
-
- def list_agents(
- self,
- uuid: str,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AnthropicListAgentsResponse:
- """
- List Agents by Anthropic Key.
-
- Args:
- page: Page number.
-
- per_page: Items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not uuid:
- raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
- return self._get(
- f"/v2/gen-ai/anthropic/keys/{uuid}/agents"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- anthropic_list_agents_params.AnthropicListAgentsParams,
- ),
- ),
- cast_to=AnthropicListAgentsResponse,
- )
-
-
-class AsyncAnthropicResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return AsyncAnthropicResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return AsyncAnthropicResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AnthropicCreateResponse:
- """
- To create an Anthropic API key, send a POST request to
- `/v2/gen-ai/anthropic/keys`.
-
- Args:
- api_key: Anthropic API key
-
- name: Name of the key
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v2/gen-ai/anthropic/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
- body=await async_maybe_transform(
- {
- "api_key": api_key,
- "name": name,
- },
- anthropic_create_params.AnthropicCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AnthropicCreateResponse,
- )
-
- async def retrieve(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AnthropicRetrieveResponse:
- """
- To retrieve details of an Anthropic API key, send a GET request to
- `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return await self._get(
- f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AnthropicRetrieveResponse,
- )
-
- async def update(
- self,
- path_api_key_uuid: str,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- body_api_key_uuid: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AnthropicUpdateResponse:
- """
- To update an Anthropic API key, send a PUT request to
- `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
-
- Args:
- api_key: Anthropic API key
-
- body_api_key_uuid: API key ID
-
- name: Name of the key
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not path_api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
- return await self._put(
- f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}",
- body=await async_maybe_transform(
- {
- "api_key": api_key,
- "body_api_key_uuid": body_api_key_uuid,
- "name": name,
- },
- anthropic_update_params.AnthropicUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AnthropicUpdateResponse,
- )
-
- async def list(
- self,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AnthropicListResponse:
- """
- To list all Anthropic API keys, send a GET request to
- `/v2/gen-ai/anthropic/keys`.
-
- Args:
- page: Page number.
-
- per_page: Items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/v2/gen-ai/anthropic/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- anthropic_list_params.AnthropicListParams,
- ),
- ),
- cast_to=AnthropicListResponse,
- )
-
- async def delete(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AnthropicDeleteResponse:
- """
- To delete an Anthropic API key, send a DELETE request to
- `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return await self._delete(
- f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=AnthropicDeleteResponse,
- )
-
- async def list_agents(
- self,
- uuid: str,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> AnthropicListAgentsResponse:
- """
- List Agents by Anthropic Key.
-
- Args:
- page: Page number.
-
- per_page: Items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not uuid:
- raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
- return await self._get(
- f"/v2/gen-ai/anthropic/keys/{uuid}/agents"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- anthropic_list_agents_params.AnthropicListAgentsParams,
- ),
- ),
- cast_to=AnthropicListAgentsResponse,
- )
-
-
-class AnthropicResourceWithRawResponse:
- def __init__(self, anthropic: AnthropicResource) -> None:
- self._anthropic = anthropic
-
- self.create = to_raw_response_wrapper(
- anthropic.create,
- )
- self.retrieve = to_raw_response_wrapper(
- anthropic.retrieve,
- )
- self.update = to_raw_response_wrapper(
- anthropic.update,
- )
- self.list = to_raw_response_wrapper(
- anthropic.list,
- )
- self.delete = to_raw_response_wrapper(
- anthropic.delete,
- )
- self.list_agents = to_raw_response_wrapper(
- anthropic.list_agents,
- )
-
-
-class AsyncAnthropicResourceWithRawResponse:
- def __init__(self, anthropic: AsyncAnthropicResource) -> None:
- self._anthropic = anthropic
-
- self.create = async_to_raw_response_wrapper(
- anthropic.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- anthropic.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- anthropic.update,
- )
- self.list = async_to_raw_response_wrapper(
- anthropic.list,
- )
- self.delete = async_to_raw_response_wrapper(
- anthropic.delete,
- )
- self.list_agents = async_to_raw_response_wrapper(
- anthropic.list_agents,
- )
-
-
-class AnthropicResourceWithStreamingResponse:
- def __init__(self, anthropic: AnthropicResource) -> None:
- self._anthropic = anthropic
-
- self.create = to_streamed_response_wrapper(
- anthropic.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- anthropic.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- anthropic.update,
- )
- self.list = to_streamed_response_wrapper(
- anthropic.list,
- )
- self.delete = to_streamed_response_wrapper(
- anthropic.delete,
- )
- self.list_agents = to_streamed_response_wrapper(
- anthropic.list_agents,
- )
-
-
-class AsyncAnthropicResourceWithStreamingResponse:
- def __init__(self, anthropic: AsyncAnthropicResource) -> None:
- self._anthropic = anthropic
-
- self.create = async_to_streamed_response_wrapper(
- anthropic.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- anthropic.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- anthropic.update,
- )
- self.list = async_to_streamed_response_wrapper(
- anthropic.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- anthropic.delete,
- )
- self.list_agents = async_to_streamed_response_wrapper(
- anthropic.list_agents,
- )
diff --git a/src/do_gradientai/resources/models/providers/openai.py b/src/do_gradientai/resources/models/providers/openai.py
deleted file mode 100644
index ccd594b8..00000000
--- a/src/do_gradientai/resources/models/providers/openai.py
+++ /dev/null
@@ -1,707 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.models.providers import (
- openai_list_params,
- openai_create_params,
- openai_update_params,
- openai_retrieve_agents_params,
-)
-from ....types.models.providers.openai_list_response import OpenAIListResponse
-from ....types.models.providers.openai_create_response import OpenAICreateResponse
-from ....types.models.providers.openai_delete_response import OpenAIDeleteResponse
-from ....types.models.providers.openai_update_response import OpenAIUpdateResponse
-from ....types.models.providers.openai_retrieve_response import OpenAIRetrieveResponse
-from ....types.models.providers.openai_retrieve_agents_response import OpenAIRetrieveAgentsResponse
-
-__all__ = ["OpenAIResource", "AsyncOpenAIResource"]
-
-
-class OpenAIResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> OpenAIResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return OpenAIResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return OpenAIResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAICreateResponse:
- """
- To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`.
-
- Args:
- api_key: OpenAI API key
-
- name: Name of the key
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v2/gen-ai/openai/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
- body=maybe_transform(
- {
- "api_key": api_key,
- "name": name,
- },
- openai_create_params.OpenAICreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAICreateResponse,
- )
-
- def retrieve(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIRetrieveResponse:
- """
- To retrieve details of an OpenAI API key, send a GET request to
- `/v2/gen-ai/openai/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return self._get(
- f"/v2/gen-ai/openai/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAIRetrieveResponse,
- )
-
- def update(
- self,
- path_api_key_uuid: str,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- body_api_key_uuid: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIUpdateResponse:
- """
- To update an OpenAI API key, send a PUT request to
- `/v2/gen-ai/openai/keys/{api_key_uuid}`.
-
- Args:
- api_key: OpenAI API key
-
- body_api_key_uuid: API key ID
-
- name: Name of the key
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not path_api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
- return self._put(
- f"/v2/gen-ai/openai/keys/{path_api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}",
- body=maybe_transform(
- {
- "api_key": api_key,
- "body_api_key_uuid": body_api_key_uuid,
- "name": name,
- },
- openai_update_params.OpenAIUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAIUpdateResponse,
- )
-
- def list(
- self,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIListResponse:
- """
- To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`.
-
- Args:
- page: Page number.
-
- per_page: Items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/v2/gen-ai/openai/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- openai_list_params.OpenAIListParams,
- ),
- ),
- cast_to=OpenAIListResponse,
- )
-
- def delete(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIDeleteResponse:
- """
- To delete an OpenAI API key, send a DELETE request to
- `/v2/gen-ai/openai/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return self._delete(
- f"/v2/gen-ai/openai/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAIDeleteResponse,
- )
-
- def retrieve_agents(
- self,
- uuid: str,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIRetrieveAgentsResponse:
- """
- List Agents by OpenAI Key.
-
- Args:
- page: Page number.
-
- per_page: Items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not uuid:
- raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
- return self._get(
- f"/v2/gen-ai/openai/keys/{uuid}/agents"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- openai_retrieve_agents_params.OpenAIRetrieveAgentsParams,
- ),
- ),
- cast_to=OpenAIRetrieveAgentsResponse,
- )
-
-
-class AsyncOpenAIResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return AsyncOpenAIResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return AsyncOpenAIResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAICreateResponse:
- """
- To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`.
-
- Args:
- api_key: OpenAI API key
-
- name: Name of the key
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v2/gen-ai/openai/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
- body=await async_maybe_transform(
- {
- "api_key": api_key,
- "name": name,
- },
- openai_create_params.OpenAICreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAICreateResponse,
- )
-
- async def retrieve(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIRetrieveResponse:
- """
- To retrieve details of an OpenAI API key, send a GET request to
- `/v2/gen-ai/openai/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return await self._get(
- f"/v2/gen-ai/openai/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAIRetrieveResponse,
- )
-
- async def update(
- self,
- path_api_key_uuid: str,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- body_api_key_uuid: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIUpdateResponse:
- """
- To update an OpenAI API key, send a PUT request to
- `/v2/gen-ai/openai/keys/{api_key_uuid}`.
-
- Args:
- api_key: OpenAI API key
-
- body_api_key_uuid: API key ID
-
- name: Name of the key
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not path_api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
- return await self._put(
- f"/v2/gen-ai/openai/keys/{path_api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}",
- body=await async_maybe_transform(
- {
- "api_key": api_key,
- "body_api_key_uuid": body_api_key_uuid,
- "name": name,
- },
- openai_update_params.OpenAIUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAIUpdateResponse,
- )
-
- async def list(
- self,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIListResponse:
- """
- To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`.
-
- Args:
- page: Page number.
-
- per_page: Items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/v2/gen-ai/openai/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- openai_list_params.OpenAIListParams,
- ),
- ),
- cast_to=OpenAIListResponse,
- )
-
- async def delete(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIDeleteResponse:
- """
- To delete an OpenAI API key, send a DELETE request to
- `/v2/gen-ai/openai/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return await self._delete(
- f"/v2/gen-ai/openai/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=OpenAIDeleteResponse,
- )
-
- async def retrieve_agents(
- self,
- uuid: str,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> OpenAIRetrieveAgentsResponse:
- """
- List Agents by OpenAI Key.
-
- Args:
- page: Page number.
-
- per_page: Items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not uuid:
- raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
- return await self._get(
- f"/v2/gen-ai/openai/keys/{uuid}/agents"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- openai_retrieve_agents_params.OpenAIRetrieveAgentsParams,
- ),
- ),
- cast_to=OpenAIRetrieveAgentsResponse,
- )
-
-
-class OpenAIResourceWithRawResponse:
- def __init__(self, openai: OpenAIResource) -> None:
- self._openai = openai
-
- self.create = to_raw_response_wrapper(
- openai.create,
- )
- self.retrieve = to_raw_response_wrapper(
- openai.retrieve,
- )
- self.update = to_raw_response_wrapper(
- openai.update,
- )
- self.list = to_raw_response_wrapper(
- openai.list,
- )
- self.delete = to_raw_response_wrapper(
- openai.delete,
- )
- self.retrieve_agents = to_raw_response_wrapper(
- openai.retrieve_agents,
- )
-
-
-class AsyncOpenAIResourceWithRawResponse:
- def __init__(self, openai: AsyncOpenAIResource) -> None:
- self._openai = openai
-
- self.create = async_to_raw_response_wrapper(
- openai.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- openai.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- openai.update,
- )
- self.list = async_to_raw_response_wrapper(
- openai.list,
- )
- self.delete = async_to_raw_response_wrapper(
- openai.delete,
- )
- self.retrieve_agents = async_to_raw_response_wrapper(
- openai.retrieve_agents,
- )
-
-
-class OpenAIResourceWithStreamingResponse:
- def __init__(self, openai: OpenAIResource) -> None:
- self._openai = openai
-
- self.create = to_streamed_response_wrapper(
- openai.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- openai.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- openai.update,
- )
- self.list = to_streamed_response_wrapper(
- openai.list,
- )
- self.delete = to_streamed_response_wrapper(
- openai.delete,
- )
- self.retrieve_agents = to_streamed_response_wrapper(
- openai.retrieve_agents,
- )
-
-
-class AsyncOpenAIResourceWithStreamingResponse:
- def __init__(self, openai: AsyncOpenAIResource) -> None:
- self._openai = openai
-
- self.create = async_to_streamed_response_wrapper(
- openai.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- openai.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- openai.update,
- )
- self.list = async_to_streamed_response_wrapper(
- openai.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- openai.delete,
- )
- self.retrieve_agents = async_to_streamed_response_wrapper(
- openai.retrieve_agents,
- )
diff --git a/src/do_gradientai/resources/models/providers/providers.py b/src/do_gradientai/resources/models/providers/providers.py
deleted file mode 100644
index 3e3f4dde..00000000
--- a/src/do_gradientai/resources/models/providers/providers.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .openai import (
- OpenAIResource,
- AsyncOpenAIResource,
- OpenAIResourceWithRawResponse,
- AsyncOpenAIResourceWithRawResponse,
- OpenAIResourceWithStreamingResponse,
- AsyncOpenAIResourceWithStreamingResponse,
-)
-from .anthropic import (
- AnthropicResource,
- AsyncAnthropicResource,
- AnthropicResourceWithRawResponse,
- AsyncAnthropicResourceWithRawResponse,
- AnthropicResourceWithStreamingResponse,
- AsyncAnthropicResourceWithStreamingResponse,
-)
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-
-__all__ = ["ProvidersResource", "AsyncProvidersResource"]
-
-
-class ProvidersResource(SyncAPIResource):
- @cached_property
- def anthropic(self) -> AnthropicResource:
- return AnthropicResource(self._client)
-
- @cached_property
- def openai(self) -> OpenAIResource:
- return OpenAIResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> ProvidersResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return ProvidersResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return ProvidersResourceWithStreamingResponse(self)
-
-
-class AsyncProvidersResource(AsyncAPIResource):
- @cached_property
- def anthropic(self) -> AsyncAnthropicResource:
- return AsyncAnthropicResource(self._client)
-
- @cached_property
- def openai(self) -> AsyncOpenAIResource:
- return AsyncOpenAIResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return AsyncProvidersResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return AsyncProvidersResourceWithStreamingResponse(self)
-
-
-class ProvidersResourceWithRawResponse:
- def __init__(self, providers: ProvidersResource) -> None:
- self._providers = providers
-
- @cached_property
- def anthropic(self) -> AnthropicResourceWithRawResponse:
- return AnthropicResourceWithRawResponse(self._providers.anthropic)
-
- @cached_property
- def openai(self) -> OpenAIResourceWithRawResponse:
- return OpenAIResourceWithRawResponse(self._providers.openai)
-
-
-class AsyncProvidersResourceWithRawResponse:
- def __init__(self, providers: AsyncProvidersResource) -> None:
- self._providers = providers
-
- @cached_property
- def anthropic(self) -> AsyncAnthropicResourceWithRawResponse:
- return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic)
-
- @cached_property
- def openai(self) -> AsyncOpenAIResourceWithRawResponse:
- return AsyncOpenAIResourceWithRawResponse(self._providers.openai)
-
-
-class ProvidersResourceWithStreamingResponse:
- def __init__(self, providers: ProvidersResource) -> None:
- self._providers = providers
-
- @cached_property
- def anthropic(self) -> AnthropicResourceWithStreamingResponse:
- return AnthropicResourceWithStreamingResponse(self._providers.anthropic)
-
- @cached_property
- def openai(self) -> OpenAIResourceWithStreamingResponse:
- return OpenAIResourceWithStreamingResponse(self._providers.openai)
-
-
-class AsyncProvidersResourceWithStreamingResponse:
- def __init__(self, providers: AsyncProvidersResource) -> None:
- self._providers = providers
-
- @cached_property
- def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse:
- return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic)
-
- @cached_property
- def openai(self) -> AsyncOpenAIResourceWithStreamingResponse:
- return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai)
diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py
index be03d4c5..56e94b41 100644
--- a/src/do_gradientai/types/__init__.py
+++ b/src/do_gradientai/types/__init__.py
@@ -4,7 +4,6 @@
from . import (
agents,
- models,
api_agent,
api_workspace,
agent_create_response,
@@ -46,12 +45,9 @@
ChatCompletionTokenLogprob as ChatCompletionTokenLogprob,
)
from .api_agent import APIAgent as APIAgent
-from .api_model import APIModel as APIModel
-from .api_agreement import APIAgreement as APIAgreement
from .api_workspace import APIWorkspace as APIWorkspace
from .api_agent_model import APIAgentModel as APIAgentModel
from .agent_list_params import AgentListParams as AgentListParams
-from .api_model_version import APIModelVersion as APIModelVersion
from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase
from .region_list_params import RegionListParams as RegionListParams
from .agent_create_params import AgentCreateParams as AgentCreateParams
@@ -191,12 +187,6 @@
agents.route_view_response.RouteViewResponse.model_rebuild(
_parent_namespace_depth=0
)
- models.providers.anthropic_list_agents_response.AnthropicListAgentsResponse.model_rebuild(
- _parent_namespace_depth=0
- )
- models.providers.openai_retrieve_agents_response.OpenAIRetrieveAgentsResponse.model_rebuild(
- _parent_namespace_depth=0
- )
else:
api_agent.APIAgent.update_forward_refs() # type: ignore
api_workspace.APIWorkspace.update_forward_refs() # type: ignore
@@ -217,5 +207,3 @@
agents.api_link_knowledge_base_output.APILinkKnowledgeBaseOutput.update_forward_refs() # type: ignore
agents.knowledge_base_detach_response.KnowledgeBaseDetachResponse.update_forward_refs() # type: ignore
agents.route_view_response.RouteViewResponse.update_forward_refs() # type: ignore
- models.providers.anthropic_list_agents_response.AnthropicListAgentsResponse.update_forward_refs() # type: ignore
- models.providers.openai_retrieve_agents_response.OpenAIRetrieveAgentsResponse.update_forward_refs() # type: ignore
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py
index 2fc17524..160e0aa3 100644
--- a/src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py
+++ b/src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py
@@ -1,13 +1,66 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
+from datetime import datetime
from ...._models import BaseModel
-from ...api_model import APIModel
from ...shared.api_meta import APIMeta
from ...shared.api_links import APILinks
-__all__ = ["ModelListResponse"]
+__all__ = ["ModelListResponse", "Model", "ModelAgreement", "ModelVersion"]
+
+
+class ModelAgreement(BaseModel):
+ description: Optional[str] = None
+
+ name: Optional[str] = None
+
+ url: Optional[str] = None
+
+ uuid: Optional[str] = None
+
+
+class ModelVersion(BaseModel):
+ major: Optional[int] = None
+ """Major version number"""
+
+ minor: Optional[int] = None
+ """Minor version number"""
+
+ patch: Optional[int] = None
+ """Patch version number"""
+
+
+class Model(BaseModel):
+ agreement: Optional[ModelAgreement] = None
+ """Agreement Description"""
+
+ created_at: Optional[datetime] = None
+ """Creation date / time"""
+
+ is_foundational: Optional[bool] = None
+ """True if it is a foundational model provided by do"""
+
+ name: Optional[str] = None
+ """Name of the model"""
+
+ parent_uuid: Optional[str] = None
+ """Unique id of the model, this model is based on"""
+
+ updated_at: Optional[datetime] = None
+ """Last modified"""
+
+ upload_complete: Optional[bool] = None
+ """Model has been fully uploaded"""
+
+ url: Optional[str] = None
+ """Download url"""
+
+ uuid: Optional[str] = None
+ """Unique id"""
+
+ version: Optional[ModelVersion] = None
+ """Version Information about a Model"""
class ModelListResponse(BaseModel):
@@ -17,5 +70,5 @@ class ModelListResponse(BaseModel):
meta: Optional[APIMeta] = None
"""Meta information about the data set"""
- models: Optional[List[APIModel]] = None
+ models: Optional[List[Model]] = None
"""The models"""
diff --git a/src/do_gradientai/types/api_agent_model.py b/src/do_gradientai/types/api_agent_model.py
index f111bfb7..588f1cd0 100644
--- a/src/do_gradientai/types/api_agent_model.py
+++ b/src/do_gradientai/types/api_agent_model.py
@@ -5,14 +5,33 @@
from typing_extensions import Literal
from .._models import BaseModel
-from .api_agreement import APIAgreement
-from .api_model_version import APIModelVersion
-__all__ = ["APIAgentModel"]
+__all__ = ["APIAgentModel", "Agreement", "Version"]
+
+
+class Agreement(BaseModel):
+ description: Optional[str] = None
+
+ name: Optional[str] = None
+
+ url: Optional[str] = None
+
+ uuid: Optional[str] = None
+
+
+class Version(BaseModel):
+ major: Optional[int] = None
+ """Major version number"""
+
+ minor: Optional[int] = None
+ """Minor version number"""
+
+ patch: Optional[int] = None
+ """Patch version number"""
class APIAgentModel(BaseModel):
- agreement: Optional[APIAgreement] = None
+ agreement: Optional[Agreement] = None
"""Agreement Description"""
created_at: Optional[datetime] = None
@@ -67,5 +86,5 @@ class APIAgentModel(BaseModel):
uuid: Optional[str] = None
"""Unique id"""
- version: Optional[APIModelVersion] = None
+ version: Optional[Version] = None
"""Version Information about a Model"""
diff --git a/src/do_gradientai/types/api_agreement.py b/src/do_gradientai/types/api_agreement.py
deleted file mode 100644
index c4359f1f..00000000
--- a/src/do_gradientai/types/api_agreement.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from .._models import BaseModel
-
-__all__ = ["APIAgreement"]
-
-
-class APIAgreement(BaseModel):
- description: Optional[str] = None
-
- name: Optional[str] = None
-
- url: Optional[str] = None
-
- uuid: Optional[str] = None
diff --git a/src/do_gradientai/types/api_model.py b/src/do_gradientai/types/api_model.py
deleted file mode 100644
index 7c530ee2..00000000
--- a/src/do_gradientai/types/api_model.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from datetime import datetime
-
-from .._models import BaseModel
-from .api_agreement import APIAgreement
-from .api_model_version import APIModelVersion
-
-__all__ = ["APIModel"]
-
-
-class APIModel(BaseModel):
- agreement: Optional[APIAgreement] = None
- """Agreement Description"""
-
- created_at: Optional[datetime] = None
- """Creation date / time"""
-
- is_foundational: Optional[bool] = None
- """True if it is a foundational model provided by do"""
-
- name: Optional[str] = None
- """Name of the model"""
-
- parent_uuid: Optional[str] = None
- """Unique id of the model, this model is based on"""
-
- updated_at: Optional[datetime] = None
- """Last modified"""
-
- upload_complete: Optional[bool] = None
- """Model has been fully uploaded"""
-
- url: Optional[str] = None
- """Download url"""
-
- uuid: Optional[str] = None
- """Unique id"""
-
- version: Optional[APIModelVersion] = None
- """Version Information about a Model"""
diff --git a/src/do_gradientai/types/api_model_version.py b/src/do_gradientai/types/api_model_version.py
deleted file mode 100644
index f19a78c6..00000000
--- a/src/do_gradientai/types/api_model_version.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from .._models import BaseModel
-
-__all__ = ["APIModelVersion"]
-
-
-class APIModelVersion(BaseModel):
- major: Optional[int] = None
- """Major version number"""
-
- minor: Optional[int] = None
- """Minor version number"""
-
- patch: Optional[int] = None
- """Patch version number"""
diff --git a/src/do_gradientai/types/models/__init__.py b/src/do_gradientai/types/models/__init__.py
deleted file mode 100644
index f8ee8b14..00000000
--- a/src/do_gradientai/types/models/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
diff --git a/src/do_gradientai/types/models/providers/__init__.py b/src/do_gradientai/types/models/providers/__init__.py
deleted file mode 100644
index 74366e70..00000000
--- a/src/do_gradientai/types/models/providers/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .openai_list_params import OpenAIListParams as OpenAIListParams
-from .openai_create_params import OpenAICreateParams as OpenAICreateParams
-from .openai_list_response import OpenAIListResponse as OpenAIListResponse
-from .openai_update_params import OpenAIUpdateParams as OpenAIUpdateParams
-from .anthropic_list_params import AnthropicListParams as AnthropicListParams
-from .openai_create_response import OpenAICreateResponse as OpenAICreateResponse
-from .openai_delete_response import OpenAIDeleteResponse as OpenAIDeleteResponse
-from .openai_update_response import OpenAIUpdateResponse as OpenAIUpdateResponse
-from .anthropic_create_params import AnthropicCreateParams as AnthropicCreateParams
-from .anthropic_list_response import AnthropicListResponse as AnthropicListResponse
-from .anthropic_update_params import AnthropicUpdateParams as AnthropicUpdateParams
-from .openai_retrieve_response import OpenAIRetrieveResponse as OpenAIRetrieveResponse
-from .anthropic_create_response import AnthropicCreateResponse as AnthropicCreateResponse
-from .anthropic_delete_response import AnthropicDeleteResponse as AnthropicDeleteResponse
-from .anthropic_update_response import AnthropicUpdateResponse as AnthropicUpdateResponse
-from .anthropic_retrieve_response import AnthropicRetrieveResponse as AnthropicRetrieveResponse
-from .anthropic_list_agents_params import AnthropicListAgentsParams as AnthropicListAgentsParams
-from .openai_retrieve_agents_params import OpenAIRetrieveAgentsParams as OpenAIRetrieveAgentsParams
-from .anthropic_list_agents_response import AnthropicListAgentsResponse as AnthropicListAgentsResponse
-from .openai_retrieve_agents_response import OpenAIRetrieveAgentsResponse as OpenAIRetrieveAgentsResponse
diff --git a/src/do_gradientai/types/models/providers/anthropic_create_params.py b/src/do_gradientai/types/models/providers/anthropic_create_params.py
deleted file mode 100644
index c9fd6e85..00000000
--- a/src/do_gradientai/types/models/providers/anthropic_create_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["AnthropicCreateParams"]
-
-
-class AnthropicCreateParams(TypedDict, total=False):
- api_key: str
- """Anthropic API key"""
-
- name: str
- """Name of the key"""
diff --git a/src/do_gradientai/types/models/providers/anthropic_create_response.py b/src/do_gradientai/types/models/providers/anthropic_create_response.py
deleted file mode 100644
index 0fbe50bc..00000000
--- a/src/do_gradientai/types/models/providers/anthropic_create_response.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
-
-__all__ = ["AnthropicCreateResponse"]
-
-
-class AnthropicCreateResponse(BaseModel):
- api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
- """Anthropic API Key Info"""
diff --git a/src/do_gradientai/types/models/providers/anthropic_delete_response.py b/src/do_gradientai/types/models/providers/anthropic_delete_response.py
deleted file mode 100644
index b4fdd978..00000000
--- a/src/do_gradientai/types/models/providers/anthropic_delete_response.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
-
-__all__ = ["AnthropicDeleteResponse"]
-
-
-class AnthropicDeleteResponse(BaseModel):
- api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
- """Anthropic API Key Info"""
diff --git a/src/do_gradientai/types/models/providers/anthropic_list_agents_params.py b/src/do_gradientai/types/models/providers/anthropic_list_agents_params.py
deleted file mode 100644
index b3308b69..00000000
--- a/src/do_gradientai/types/models/providers/anthropic_list_agents_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["AnthropicListAgentsParams"]
-
-
-class AnthropicListAgentsParams(TypedDict, total=False):
- page: int
- """Page number."""
-
- per_page: int
- """Items per page."""
diff --git a/src/do_gradientai/types/models/providers/anthropic_list_agents_response.py b/src/do_gradientai/types/models/providers/anthropic_list_agents_response.py
deleted file mode 100644
index a1525275..00000000
--- a/src/do_gradientai/types/models/providers/anthropic_list_agents_response.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Optional
-
-from ...._models import BaseModel
-from ...shared.api_meta import APIMeta
-from ...shared.api_links import APILinks
-
-__all__ = ["AnthropicListAgentsResponse"]
-
-
-class AnthropicListAgentsResponse(BaseModel):
- agents: Optional[List["APIAgent"]] = None
-
- links: Optional[APILinks] = None
- """Links to other pages"""
-
- meta: Optional[APIMeta] = None
- """Meta information about the data set"""
-
-
-from ...api_agent import APIAgent
diff --git a/src/do_gradientai/types/models/providers/anthropic_list_params.py b/src/do_gradientai/types/models/providers/anthropic_list_params.py
deleted file mode 100644
index ae1cca58..00000000
--- a/src/do_gradientai/types/models/providers/anthropic_list_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["AnthropicListParams"]
-
-
-class AnthropicListParams(TypedDict, total=False):
- page: int
- """Page number."""
-
- per_page: int
- """Items per page."""
diff --git a/src/do_gradientai/types/models/providers/anthropic_list_response.py b/src/do_gradientai/types/models/providers/anthropic_list_response.py
deleted file mode 100644
index 24d6547a..00000000
--- a/src/do_gradientai/types/models/providers/anthropic_list_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-
-from ...._models import BaseModel
-from ...shared.api_meta import APIMeta
-from ...shared.api_links import APILinks
-from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
-
-__all__ = ["AnthropicListResponse"]
-
-
-class AnthropicListResponse(BaseModel):
- api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None
- """Api key infos"""
-
- links: Optional[APILinks] = None
- """Links to other pages"""
-
- meta: Optional[APIMeta] = None
- """Meta information about the data set"""
diff --git a/src/do_gradientai/types/models/providers/anthropic_retrieve_response.py b/src/do_gradientai/types/models/providers/anthropic_retrieve_response.py
deleted file mode 100644
index 61324b7d..00000000
--- a/src/do_gradientai/types/models/providers/anthropic_retrieve_response.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
-
-__all__ = ["AnthropicRetrieveResponse"]
-
-
-class AnthropicRetrieveResponse(BaseModel):
- api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
- """Anthropic API Key Info"""
diff --git a/src/do_gradientai/types/models/providers/anthropic_update_params.py b/src/do_gradientai/types/models/providers/anthropic_update_params.py
deleted file mode 100644
index 865dc29c..00000000
--- a/src/do_gradientai/types/models/providers/anthropic_update_params.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Annotated, TypedDict
-
-from ...._utils import PropertyInfo
-
-__all__ = ["AnthropicUpdateParams"]
-
-
-class AnthropicUpdateParams(TypedDict, total=False):
- api_key: str
- """Anthropic API key"""
-
- body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
- """API key ID"""
-
- name: str
- """Name of the key"""
diff --git a/src/do_gradientai/types/models/providers/anthropic_update_response.py b/src/do_gradientai/types/models/providers/anthropic_update_response.py
deleted file mode 100644
index 3a6daaea..00000000
--- a/src/do_gradientai/types/models/providers/anthropic_update_response.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
-
-__all__ = ["AnthropicUpdateResponse"]
-
-
-class AnthropicUpdateResponse(BaseModel):
- api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
- """Anthropic API Key Info"""
diff --git a/src/do_gradientai/types/models/providers/openai_create_params.py b/src/do_gradientai/types/models/providers/openai_create_params.py
deleted file mode 100644
index 8ed7f571..00000000
--- a/src/do_gradientai/types/models/providers/openai_create_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["OpenAICreateParams"]
-
-
-class OpenAICreateParams(TypedDict, total=False):
- api_key: str
- """OpenAI API key"""
-
- name: str
- """Name of the key"""
diff --git a/src/do_gradientai/types/models/providers/openai_create_response.py b/src/do_gradientai/types/models/providers/openai_create_response.py
deleted file mode 100644
index b2e94766..00000000
--- a/src/do_gradientai/types/models/providers/openai_create_response.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
-
-__all__ = ["OpenAICreateResponse"]
-
-
-class OpenAICreateResponse(BaseModel):
- api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
- """OpenAI API Key Info"""
diff --git a/src/do_gradientai/types/models/providers/openai_delete_response.py b/src/do_gradientai/types/models/providers/openai_delete_response.py
deleted file mode 100644
index e59c89fe..00000000
--- a/src/do_gradientai/types/models/providers/openai_delete_response.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
-
-__all__ = ["OpenAIDeleteResponse"]
-
-
-class OpenAIDeleteResponse(BaseModel):
- api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
- """OpenAI API Key Info"""
diff --git a/src/do_gradientai/types/models/providers/openai_list_params.py b/src/do_gradientai/types/models/providers/openai_list_params.py
deleted file mode 100644
index 5677eeaf..00000000
--- a/src/do_gradientai/types/models/providers/openai_list_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["OpenAIListParams"]
-
-
-class OpenAIListParams(TypedDict, total=False):
- page: int
- """Page number."""
-
- per_page: int
- """Items per page."""
diff --git a/src/do_gradientai/types/models/providers/openai_list_response.py b/src/do_gradientai/types/models/providers/openai_list_response.py
deleted file mode 100644
index 698cd11e..00000000
--- a/src/do_gradientai/types/models/providers/openai_list_response.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-
-from ...._models import BaseModel
-from ...shared.api_meta import APIMeta
-from ...shared.api_links import APILinks
-from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
-
-__all__ = ["OpenAIListResponse"]
-
-
-class OpenAIListResponse(BaseModel):
- api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None
- """Api key infos"""
-
- links: Optional[APILinks] = None
- """Links to other pages"""
-
- meta: Optional[APIMeta] = None
- """Meta information about the data set"""
diff --git a/src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py b/src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py
deleted file mode 100644
index 2db6d7a1..00000000
--- a/src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["OpenAIRetrieveAgentsParams"]
-
-
-class OpenAIRetrieveAgentsParams(TypedDict, total=False):
- page: int
- """Page number."""
-
- per_page: int
- """Items per page."""
diff --git a/src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py b/src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py
deleted file mode 100644
index 717a56cd..00000000
--- a/src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Optional
-
-from ...._models import BaseModel
-from ...shared.api_meta import APIMeta
-from ...shared.api_links import APILinks
-
-__all__ = ["OpenAIRetrieveAgentsResponse"]
-
-
-class OpenAIRetrieveAgentsResponse(BaseModel):
- agents: Optional[List["APIAgent"]] = None
-
- links: Optional[APILinks] = None
- """Links to other pages"""
-
- meta: Optional[APIMeta] = None
- """Meta information about the data set"""
-
-
-from ...api_agent import APIAgent
diff --git a/src/do_gradientai/types/models/providers/openai_retrieve_response.py b/src/do_gradientai/types/models/providers/openai_retrieve_response.py
deleted file mode 100644
index 0f382073..00000000
--- a/src/do_gradientai/types/models/providers/openai_retrieve_response.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
-
-__all__ = ["OpenAIRetrieveResponse"]
-
-
-class OpenAIRetrieveResponse(BaseModel):
- api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
- """OpenAI API Key Info"""
diff --git a/src/do_gradientai/types/models/providers/openai_update_params.py b/src/do_gradientai/types/models/providers/openai_update_params.py
deleted file mode 100644
index 9b99495e..00000000
--- a/src/do_gradientai/types/models/providers/openai_update_params.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Annotated, TypedDict
-
-from ...._utils import PropertyInfo
-
-__all__ = ["OpenAIUpdateParams"]
-
-
-class OpenAIUpdateParams(TypedDict, total=False):
- api_key: str
- """OpenAI API key"""
-
- body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
- """API key ID"""
-
- name: str
- """Name of the key"""
diff --git a/src/do_gradientai/types/models/providers/openai_update_response.py b/src/do_gradientai/types/models/providers/openai_update_response.py
deleted file mode 100644
index ec7a1c94..00000000
--- a/src/do_gradientai/types/models/providers/openai_update_response.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
-
-__all__ = ["OpenAIUpdateResponse"]
-
-
-class OpenAIUpdateResponse(BaseModel):
- api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
- """OpenAI API Key Info"""
diff --git a/tests/api_resources/models/__init__.py b/tests/api_resources/models/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/models/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/models/providers/__init__.py b/tests/api_resources/models/providers/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/models/providers/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/models/providers/test_anthropic.py b/tests/api_resources/models/providers/test_anthropic.py
deleted file mode 100644
index 6b3d99a3..00000000
--- a/tests/api_resources/models/providers/test_anthropic.py
+++ /dev/null
@@ -1,557 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from do_gradientai import GradientAI, AsyncGradientAI
-from do_gradientai.types.models.providers import (
- AnthropicListResponse,
- AnthropicCreateResponse,
- AnthropicDeleteResponse,
- AnthropicUpdateResponse,
- AnthropicRetrieveResponse,
- AnthropicListAgentsResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestAnthropic:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: GradientAI) -> None:
- anthropic = client.models.providers.anthropic.create()
- assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: GradientAI) -> None:
- anthropic = client.models.providers.anthropic.create(
- api_key='"sk-ant-12345678901234567890123456789012"',
- name='"Production Key"',
- )
- assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: GradientAI) -> None:
- response = client.models.providers.anthropic.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- anthropic = response.parse()
- assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: GradientAI) -> None:
- with client.models.providers.anthropic.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- anthropic = response.parse()
- assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: GradientAI) -> None:
- anthropic = client.models.providers.anthropic.retrieve(
- "api_key_uuid",
- )
- assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: GradientAI) -> None:
- response = client.models.providers.anthropic.with_raw_response.retrieve(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- anthropic = response.parse()
- assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: GradientAI) -> None:
- with client.models.providers.anthropic.with_streaming_response.retrieve(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- anthropic = response.parse()
- assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- client.models.providers.anthropic.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: GradientAI) -> None:
- anthropic = client.models.providers.anthropic.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
- assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: GradientAI) -> None:
- anthropic = client.models.providers.anthropic.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- api_key='"sk-ant-12345678901234567890123456789012"',
- body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
- name='"Production Key"',
- )
- assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: GradientAI) -> None:
- response = client.models.providers.anthropic.with_raw_response.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- anthropic = response.parse()
- assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: GradientAI) -> None:
- with client.models.providers.anthropic.with_streaming_response.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- anthropic = response.parse()
- assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
- client.models.providers.anthropic.with_raw_response.update(
- path_api_key_uuid="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: GradientAI) -> None:
- anthropic = client.models.providers.anthropic.list()
- assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: GradientAI) -> None:
- anthropic = client.models.providers.anthropic.list(
- page=0,
- per_page=0,
- )
- assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: GradientAI) -> None:
- response = client.models.providers.anthropic.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- anthropic = response.parse()
- assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: GradientAI) -> None:
- with client.models.providers.anthropic.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- anthropic = response.parse()
- assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: GradientAI) -> None:
- anthropic = client.models.providers.anthropic.delete(
- "api_key_uuid",
- )
- assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: GradientAI) -> None:
- response = client.models.providers.anthropic.with_raw_response.delete(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- anthropic = response.parse()
- assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: GradientAI) -> None:
- with client.models.providers.anthropic.with_streaming_response.delete(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- anthropic = response.parse()
- assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- client.models.providers.anthropic.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_agents(self, client: GradientAI) -> None:
- anthropic = client.models.providers.anthropic.list_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
- assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_agents_with_all_params(self, client: GradientAI) -> None:
- anthropic = client.models.providers.anthropic.list_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- page=0,
- per_page=0,
- )
- assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list_agents(self, client: GradientAI) -> None:
- response = client.models.providers.anthropic.with_raw_response.list_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- anthropic = response.parse()
- assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list_agents(self, client: GradientAI) -> None:
- with client.models.providers.anthropic.with_streaming_response.list_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- anthropic = response.parse()
- assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list_agents(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- client.models.providers.anthropic.with_raw_response.list_agents(
- uuid="",
- )
-
-
-class TestAsyncAnthropic:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncGradientAI) -> None:
- anthropic = await async_client.models.providers.anthropic.create()
- assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
- anthropic = await async_client.models.providers.anthropic.create(
- api_key='"sk-ant-12345678901234567890123456789012"',
- name='"Production Key"',
- )
- assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.models.providers.anthropic.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- anthropic = await response.parse()
- assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
- async with async_client.models.providers.anthropic.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- anthropic = await response.parse()
- assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
- anthropic = await async_client.models.providers.anthropic.retrieve(
- "api_key_uuid",
- )
- assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.models.providers.anthropic.with_raw_response.retrieve(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- anthropic = await response.parse()
- assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- async with async_client.models.providers.anthropic.with_streaming_response.retrieve(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- anthropic = await response.parse()
- assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- await async_client.models.providers.anthropic.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncGradientAI) -> None:
- anthropic = await async_client.models.providers.anthropic.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
- assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None:
- anthropic = await async_client.models.providers.anthropic.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- api_key='"sk-ant-12345678901234567890123456789012"',
- body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
- name='"Production Key"',
- )
- assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.models.providers.anthropic.with_raw_response.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- anthropic = await response.parse()
- assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
- async with async_client.models.providers.anthropic.with_streaming_response.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- anthropic = await response.parse()
- assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
- await async_client.models.providers.anthropic.with_raw_response.update(
- path_api_key_uuid="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncGradientAI) -> None:
- anthropic = await async_client.models.providers.anthropic.list()
- assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
- anthropic = await async_client.models.providers.anthropic.list(
- page=0,
- per_page=0,
- )
- assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.models.providers.anthropic.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- anthropic = await response.parse()
- assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
- async with async_client.models.providers.anthropic.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- anthropic = await response.parse()
- assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
- anthropic = await async_client.models.providers.anthropic.delete(
- "api_key_uuid",
- )
- assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.models.providers.anthropic.with_raw_response.delete(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- anthropic = await response.parse()
- assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None:
- async with async_client.models.providers.anthropic.with_streaming_response.delete(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- anthropic = await response.parse()
- assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- await async_client.models.providers.anthropic.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None:
- anthropic = await async_client.models.providers.anthropic.list_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
- assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None:
- anthropic = await async_client.models.providers.anthropic.list_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- page=0,
- per_page=0,
- )
- assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.models.providers.anthropic.with_raw_response.list_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- anthropic = await response.parse()
- assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None:
- async with async_client.models.providers.anthropic.with_streaming_response.list_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- anthropic = await response.parse()
- assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- await async_client.models.providers.anthropic.with_raw_response.list_agents(
- uuid="",
- )
diff --git a/tests/api_resources/models/providers/test_openai.py b/tests/api_resources/models/providers/test_openai.py
deleted file mode 100644
index bdde97ca..00000000
--- a/tests/api_resources/models/providers/test_openai.py
+++ /dev/null
@@ -1,557 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from do_gradientai import GradientAI, AsyncGradientAI
-from do_gradientai.types.models.providers import (
- OpenAIListResponse,
- OpenAICreateResponse,
- OpenAIDeleteResponse,
- OpenAIUpdateResponse,
- OpenAIRetrieveResponse,
- OpenAIRetrieveAgentsResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestOpenAI:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: GradientAI) -> None:
- openai = client.models.providers.openai.create()
- assert_matches_type(OpenAICreateResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: GradientAI) -> None:
- openai = client.models.providers.openai.create(
- api_key='"sk-proj--123456789098765432123456789"',
- name='"Production Key"',
- )
- assert_matches_type(OpenAICreateResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: GradientAI) -> None:
- response = client.models.providers.openai.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- openai = response.parse()
- assert_matches_type(OpenAICreateResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: GradientAI) -> None:
- with client.models.providers.openai.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- openai = response.parse()
- assert_matches_type(OpenAICreateResponse, openai, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: GradientAI) -> None:
- openai = client.models.providers.openai.retrieve(
- "api_key_uuid",
- )
- assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: GradientAI) -> None:
- response = client.models.providers.openai.with_raw_response.retrieve(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- openai = response.parse()
- assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: GradientAI) -> None:
- with client.models.providers.openai.with_streaming_response.retrieve(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- openai = response.parse()
- assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- client.models.providers.openai.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: GradientAI) -> None:
- openai = client.models.providers.openai.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
- assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: GradientAI) -> None:
- openai = client.models.providers.openai.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- api_key='"sk-ant-12345678901234567890123456789012"',
- body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
- name='"Production Key"',
- )
- assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: GradientAI) -> None:
- response = client.models.providers.openai.with_raw_response.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- openai = response.parse()
- assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: GradientAI) -> None:
- with client.models.providers.openai.with_streaming_response.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- openai = response.parse()
- assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
- client.models.providers.openai.with_raw_response.update(
- path_api_key_uuid="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: GradientAI) -> None:
- openai = client.models.providers.openai.list()
- assert_matches_type(OpenAIListResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: GradientAI) -> None:
- openai = client.models.providers.openai.list(
- page=0,
- per_page=0,
- )
- assert_matches_type(OpenAIListResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: GradientAI) -> None:
- response = client.models.providers.openai.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- openai = response.parse()
- assert_matches_type(OpenAIListResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: GradientAI) -> None:
- with client.models.providers.openai.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- openai = response.parse()
- assert_matches_type(OpenAIListResponse, openai, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: GradientAI) -> None:
- openai = client.models.providers.openai.delete(
- "api_key_uuid",
- )
- assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: GradientAI) -> None:
- response = client.models.providers.openai.with_raw_response.delete(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- openai = response.parse()
- assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: GradientAI) -> None:
- with client.models.providers.openai.with_streaming_response.delete(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- openai = response.parse()
- assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- client.models.providers.openai.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_agents(self, client: GradientAI) -> None:
- openai = client.models.providers.openai.retrieve_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
- assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> None:
- openai = client.models.providers.openai.retrieve_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- page=0,
- per_page=0,
- )
- assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve_agents(self, client: GradientAI) -> None:
- response = client.models.providers.openai.with_raw_response.retrieve_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- openai = response.parse()
- assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None:
- with client.models.providers.openai.with_streaming_response.retrieve_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- openai = response.parse()
- assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve_agents(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- client.models.providers.openai.with_raw_response.retrieve_agents(
- uuid="",
- )
-
-
-class TestAsyncOpenAI:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncGradientAI) -> None:
- openai = await async_client.models.providers.openai.create()
- assert_matches_type(OpenAICreateResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
- openai = await async_client.models.providers.openai.create(
- api_key='"sk-proj--123456789098765432123456789"',
- name='"Production Key"',
- )
- assert_matches_type(OpenAICreateResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.models.providers.openai.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- openai = await response.parse()
- assert_matches_type(OpenAICreateResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
- async with async_client.models.providers.openai.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- openai = await response.parse()
- assert_matches_type(OpenAICreateResponse, openai, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
- openai = await async_client.models.providers.openai.retrieve(
- "api_key_uuid",
- )
- assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.models.providers.openai.with_raw_response.retrieve(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- openai = await response.parse()
- assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- async with async_client.models.providers.openai.with_streaming_response.retrieve(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- openai = await response.parse()
- assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- await async_client.models.providers.openai.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncGradientAI) -> None:
- openai = await async_client.models.providers.openai.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
- assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None:
- openai = await async_client.models.providers.openai.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- api_key='"sk-ant-12345678901234567890123456789012"',
- body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
- name='"Production Key"',
- )
- assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.models.providers.openai.with_raw_response.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- openai = await response.parse()
- assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
- async with async_client.models.providers.openai.with_streaming_response.update(
- path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- openai = await response.parse()
- assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
- await async_client.models.providers.openai.with_raw_response.update(
- path_api_key_uuid="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncGradientAI) -> None:
- openai = await async_client.models.providers.openai.list()
- assert_matches_type(OpenAIListResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
- openai = await async_client.models.providers.openai.list(
- page=0,
- per_page=0,
- )
- assert_matches_type(OpenAIListResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.models.providers.openai.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- openai = await response.parse()
- assert_matches_type(OpenAIListResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
- async with async_client.models.providers.openai.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- openai = await response.parse()
- assert_matches_type(OpenAIListResponse, openai, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
- openai = await async_client.models.providers.openai.delete(
- "api_key_uuid",
- )
- assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.models.providers.openai.with_raw_response.delete(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- openai = await response.parse()
- assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None:
- async with async_client.models.providers.openai.with_streaming_response.delete(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- openai = await response.parse()
- assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- await async_client.models.providers.openai.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
- openai = await async_client.models.providers.openai.retrieve_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
- assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradientAI) -> None:
- openai = await async_client.models.providers.openai.retrieve_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- page=0,
- per_page=0,
- )
- assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.models.providers.openai.with_raw_response.retrieve_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- openai = await response.parse()
- assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
- async with async_client.models.providers.openai.with_streaming_response.retrieve_agents(
- uuid='"123e4567-e89b-12d3-a456-426614174000"',
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- openai = await response.parse()
- assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- await async_client.models.providers.openai.with_raw_response.retrieve_agents(
- uuid="",
- )
From 4292abf5ba2e89dedf7f7660f6e274e42a163ae0 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 29 Jul 2025 19:06:18 +0000
Subject: [PATCH 04/10] feat(api): update via SDK Studio
---
.stats.yml | 4 +-
api.md | 76 ++
src/do_gradientai/_client.py | 296 +-------
src/do_gradientai/resources/__init__.py | 14 +
.../agents/evaluation_metrics/__init__.py | 28 +
.../evaluation_metrics/anthropic/__init__.py | 33 +
.../evaluation_metrics/anthropic/anthropic.py | 102 +++
.../evaluation_metrics/anthropic/keys.py | 711 ++++++++++++++++++
.../evaluation_metrics/evaluation_metrics.py | 64 ++
.../evaluation_metrics/openai/__init__.py | 33 +
.../agents/evaluation_metrics/openai/keys.py | 707 +++++++++++++++++
.../evaluation_metrics/openai/openai.py | 102 +++
.../resources/databases/__init__.py | 33 +
.../resources/databases/databases.py | 102 +++
.../databases/schema_registry/__init__.py | 33 +
.../databases/schema_registry/config.py | 506 +++++++++++++
.../schema_registry/schema_registry.py | 102 +++
src/do_gradientai/types/__init__.py | 8 +
.../evaluation_metrics/anthropic/__init__.py | 14 +
.../anthropic/key_create_params.py | 15 +
.../anthropic/key_create_response.py | 13 +
.../anthropic/key_delete_response.py | 13 +
.../anthropic/key_list_agents_params.py | 15 +
.../anthropic/key_list_agents_response.py | 24 +
.../anthropic/key_list_params.py | 15 +
.../anthropic/key_list_response.py | 21 +
.../anthropic/key_retrieve_response.py | 13 +
.../anthropic/key_update_params.py | 20 +
.../anthropic/key_update_response.py | 13 +
.../evaluation_metrics/openai/__init__.py | 14 +
.../openai/key_create_params.py | 15 +
.../openai/key_create_response.py | 13 +
.../openai/key_delete_response.py | 13 +
.../openai/key_list_agents_params.py | 15 +
.../openai/key_list_agents_response.py | 24 +
.../openai/key_list_params.py | 15 +
.../openai/key_list_response.py | 21 +
.../openai/key_retrieve_response.py | 13 +
.../openai/key_update_params.py | 20 +
.../openai/key_update_response.py | 13 +
src/do_gradientai/types/databases/__init__.py | 3 +
.../databases/schema_registry/__init__.py | 10 +
.../config_retrieve_response.py | 14 +
.../config_retrieve_subject_response.py | 17 +
.../schema_registry/config_update_params.py | 14 +
.../schema_registry/config_update_response.py | 14 +
.../config_update_subject_params.py | 16 +
.../config_update_subject_response.py | 17 +
.../evaluation_metrics/anthropic/__init__.py | 1 +
.../evaluation_metrics/anthropic/test_keys.py | 557 ++++++++++++++
.../evaluation_metrics/openai/__init__.py | 1 +
.../evaluation_metrics/openai/test_keys.py | 557 ++++++++++++++
tests/api_resources/databases/__init__.py | 1 +
.../databases/schema_registry/__init__.py | 1 +
.../databases/schema_registry/test_config.py | 423 +++++++++++
55 files changed, 4677 insertions(+), 275 deletions(-)
create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/anthropic/__init__.py
create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/anthropic/anthropic.py
create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/anthropic/keys.py
create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/openai/__init__.py
create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/openai/keys.py
create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/openai/openai.py
create mode 100644 src/do_gradientai/resources/databases/__init__.py
create mode 100644 src/do_gradientai/resources/databases/databases.py
create mode 100644 src/do_gradientai/resources/databases/schema_registry/__init__.py
create mode 100644 src/do_gradientai/resources/databases/schema_registry/config.py
create mode 100644 src/do_gradientai/resources/databases/schema_registry/schema_registry.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/__init__.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_params.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_delete_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_params.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_params.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/__init__.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_params.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_delete_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_params.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_params.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_retrieve_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_params.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_response.py
create mode 100644 src/do_gradientai/types/databases/__init__.py
create mode 100644 src/do_gradientai/types/databases/schema_registry/__init__.py
create mode 100644 src/do_gradientai/types/databases/schema_registry/config_retrieve_response.py
create mode 100644 src/do_gradientai/types/databases/schema_registry/config_retrieve_subject_response.py
create mode 100644 src/do_gradientai/types/databases/schema_registry/config_update_params.py
create mode 100644 src/do_gradientai/types/databases/schema_registry/config_update_response.py
create mode 100644 src/do_gradientai/types/databases/schema_registry/config_update_subject_params.py
create mode 100644 src/do_gradientai/types/databases/schema_registry/config_update_subject_response.py
create mode 100644 tests/api_resources/agents/evaluation_metrics/anthropic/__init__.py
create mode 100644 tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py
create mode 100644 tests/api_resources/agents/evaluation_metrics/openai/__init__.py
create mode 100644 tests/api_resources/agents/evaluation_metrics/openai/test_keys.py
create mode 100644 tests/api_resources/databases/__init__.py
create mode 100644 tests/api_resources/databases/schema_registry/__init__.py
create mode 100644 tests/api_resources/databases/schema_registry/test_config.py
diff --git a/.stats.yml b/.stats.yml
index 1b2683e0..d1c0ec36 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 154
+configured_endpoints: 170
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-015417b36365dfcb32166e67379c38de8bf5127c33dff646097a819a7b4dc588.yml
openapi_spec_hash: d7d811c13cc79f15d82fe680cf425859
-config_hash: 8833ecca3133e27ffb01c9c12013e938
+config_hash: debcb81a744e9f69195a635bb70d45eb
diff --git a/api.md b/api.md
index 9e2ad010..297f0021 100644
--- a/api.md
+++ b/api.md
@@ -168,6 +168,58 @@ Methods:
- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse
+### Anthropic
+
+#### Keys
+
+Types:
+
+```python
+from do_gradientai.types.agents.evaluation_metrics.anthropic import (
+ KeyCreateResponse,
+ KeyRetrieveResponse,
+ KeyUpdateResponse,
+ KeyListResponse,
+ KeyDeleteResponse,
+ KeyListAgentsResponse,
+)
+```
+
+Methods:
+
+- client.agents.evaluation_metrics.anthropic.keys.create(\*\*params) -> KeyCreateResponse
+- client.agents.evaluation_metrics.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
+- client.agents.evaluation_metrics.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
+- client.agents.evaluation_metrics.anthropic.keys.list(\*\*params) -> KeyListResponse
+- client.agents.evaluation_metrics.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse
+- client.agents.evaluation_metrics.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse
+
+### OpenAI
+
+#### Keys
+
+Types:
+
+```python
+from do_gradientai.types.agents.evaluation_metrics.openai import (
+ KeyCreateResponse,
+ KeyRetrieveResponse,
+ KeyUpdateResponse,
+ KeyListResponse,
+ KeyDeleteResponse,
+ KeyListAgentsResponse,
+)
+```
+
+Methods:
+
+- client.agents.evaluation_metrics.openai.keys.create(\*\*params) -> KeyCreateResponse
+- client.agents.evaluation_metrics.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
+- client.agents.evaluation_metrics.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
+- client.agents.evaluation_metrics.openai.keys.list(\*\*params) -> KeyListResponse
+- client.agents.evaluation_metrics.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse
+- client.agents.evaluation_metrics.openai.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse
+
## EvaluationRuns
Types:
@@ -806,3 +858,27 @@ from do_gradientai.types import RegionListResponse
Methods:
- client.regions.list(\*\*params) -> RegionListResponse
+
+# Databases
+
+## SchemaRegistry
+
+### Config
+
+Types:
+
+```python
+from do_gradientai.types.databases.schema_registry import (
+ ConfigRetrieveResponse,
+ ConfigUpdateResponse,
+ ConfigRetrieveSubjectResponse,
+ ConfigUpdateSubjectResponse,
+)
+```
+
+Methods:
+
+- client.databases.schema_registry.config.retrieve(database_cluster_uuid) -> ConfigRetrieveResponse
+- client.databases.schema_registry.config.update(database_cluster_uuid, \*\*params) -> ConfigUpdateResponse
+- client.databases.schema_registry.config.retrieve_subject(subject_name, \*, database_cluster_uuid) -> ConfigRetrieveSubjectResponse
+- client.databases.schema_registry.config.update_subject(subject_name, \*, database_cluster_uuid, \*\*params) -> ConfigUpdateSubjectResponse
diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py
index bd3b9908..31b4ee75 100644
--- a/src/do_gradientai/_client.py
+++ b/src/do_gradientai/_client.py
@@ -32,7 +32,7 @@
)
if TYPE_CHECKING:
- from .resources import chat, agents, regions, inference, gpu_droplets, knowledge_bases
+ from .resources import chat, agents, regions, databases, inference, gpu_droplets, knowledge_bases
from .resources.regions import RegionsResource, AsyncRegionsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
from .resources.gpu_droplets import (
@@ -48,6 +48,7 @@
load_balancers,
)
from .resources.agents.agents import AgentsResource, AsyncAgentsResource
+ from .resources.databases.databases import DatabasesResource, AsyncDatabasesResource
from .resources.inference.inference import InferenceResource, AsyncInferenceResource
from .resources.gpu_droplets.snapshots import (
SnapshotsResource,
@@ -216,6 +217,12 @@ def regions(self) -> RegionsResource:
return RegionsResource(self)
+ @cached_property
+ def databases(self) -> DatabasesResource:
+ from .resources.databases import DatabasesResource
+
+ return DatabasesResource(self)
+
@cached_property
def firewalls(self) -> FirewallsResource:
from .resources.gpu_droplets.firewalls import FirewallsResource
@@ -521,52 +528,10 @@ def regions(self) -> AsyncRegionsResource:
return AsyncRegionsResource(self)
@cached_property
- def firewalls(self) -> AsyncFirewallsResource:
- from .resources.gpu_droplets.firewalls import AsyncFirewallsResource
-
- return AsyncFirewallsResource(self)
-
- @cached_property
- def floating_ips(self) -> AsyncFloatingIPsResource:
- from .resources.gpu_droplets.floating_ips import AsyncFloatingIPsResource
-
- return AsyncFloatingIPsResource(self)
-
- @cached_property
- def images(self) -> AsyncImagesResource:
- from .resources.gpu_droplets.images import AsyncImagesResource
-
- return AsyncImagesResource(self)
-
- @cached_property
- def load_balancers(self) -> AsyncLoadBalancersResource:
- from .resources.gpu_droplets.load_balancers import AsyncLoadBalancersResource
-
- return AsyncLoadBalancersResource(self)
-
- @cached_property
- def sizes(self) -> AsyncSizesResource:
- from .resources.gpu_droplets.sizes import AsyncSizesResource
-
- return AsyncSizesResource(self)
-
- @cached_property
- def snapshots(self) -> AsyncSnapshotsResource:
- from .resources.gpu_droplets.snapshots import AsyncSnapshotsResource
+ def databases(self) -> AsyncDatabasesResource:
+ from .resources.databases import AsyncDatabasesResource
- return AsyncSnapshotsResource(self)
-
- @cached_property
- def volumes(self) -> AsyncVolumesResource:
- from .resources.gpu_droplets.volumes import AsyncVolumesResource
-
- return AsyncVolumesResource(self)
-
- @cached_property
- def account(self) -> AsyncAccountResource:
- from .resources.gpu_droplets.account import AsyncAccountResource
-
- return AsyncAccountResource(self)
+ return AsyncDatabasesResource(self)
@cached_property
def with_raw_response(self) -> AsyncGradientAIWithRawResponse:
@@ -745,57 +710,10 @@ def regions(self) -> regions.RegionsResourceWithRawResponse:
return RegionsResourceWithRawResponse(self._client.regions)
@cached_property
- def firewalls(self) -> firewalls.FirewallsResourceWithRawResponse:
- from .resources.gpu_droplets.firewalls import FirewallsResourceWithRawResponse
-
- return FirewallsResourceWithRawResponse(self._client.firewalls)
-
- @cached_property
- def floating_ips(self) -> floating_ips.FloatingIPsResourceWithRawResponse:
- from .resources.gpu_droplets.floating_ips import (
- FloatingIPsResourceWithRawResponse,
- )
-
- return FloatingIPsResourceWithRawResponse(self._client.floating_ips)
-
- @cached_property
- def images(self) -> images.ImagesResourceWithRawResponse:
- from .resources.gpu_droplets.images import ImagesResourceWithRawResponse
-
- return ImagesResourceWithRawResponse(self._client.images)
-
- @cached_property
- def load_balancers(self) -> load_balancers.LoadBalancersResourceWithRawResponse:
- from .resources.gpu_droplets.load_balancers import (
- LoadBalancersResourceWithRawResponse,
- )
-
- return LoadBalancersResourceWithRawResponse(self._client.load_balancers)
-
- @cached_property
- def sizes(self) -> sizes.SizesResourceWithRawResponse:
- from .resources.gpu_droplets.sizes import SizesResourceWithRawResponse
-
- return SizesResourceWithRawResponse(self._client.sizes)
-
- @cached_property
- def snapshots(self) -> snapshots.SnapshotsResourceWithRawResponse:
- from .resources.gpu_droplets.snapshots import SnapshotsResourceWithRawResponse
-
- return SnapshotsResourceWithRawResponse(self._client.snapshots)
-
- @cached_property
- def volumes(self) -> volumes.VolumesResourceWithRawResponse:
- from .resources.gpu_droplets.volumes import VolumesResourceWithRawResponse
-
- return VolumesResourceWithRawResponse(self._client.volumes)
-
- @cached_property
- def account(self) -> account.AccountResourceWithRawResponse:
- from .resources.gpu_droplets.account import AccountResourceWithRawResponse
-
- return AccountResourceWithRawResponse(self._client.account)
+ def databases(self) -> databases.DatabasesResourceWithRawResponse:
+ from .resources.databases import DatabasesResourceWithRawResponse
+ return DatabasesResourceWithRawResponse(self._client.databases)
class AsyncGradientAIWithRawResponse:
_client: AsyncGradientAI
@@ -840,63 +758,10 @@ def regions(self) -> regions.AsyncRegionsResourceWithRawResponse:
return AsyncRegionsResourceWithRawResponse(self._client.regions)
@cached_property
- def firewalls(self) -> firewalls.AsyncFirewallsResourceWithRawResponse:
- from .resources.gpu_droplets.firewalls import (
- AsyncFirewallsResourceWithRawResponse,
- )
-
- return AsyncFirewallsResourceWithRawResponse(self._client.firewalls)
-
- @cached_property
- def floating_ips(self) -> floating_ips.AsyncFloatingIPsResourceWithRawResponse:
- from .resources.gpu_droplets.floating_ips import (
- AsyncFloatingIPsResourceWithRawResponse,
- )
-
- return AsyncFloatingIPsResourceWithRawResponse(self._client.floating_ips)
-
- @cached_property
- def images(self) -> images.AsyncImagesResourceWithRawResponse:
- from .resources.gpu_droplets.images import AsyncImagesResourceWithRawResponse
-
- return AsyncImagesResourceWithRawResponse(self._client.images)
-
- @cached_property
- def load_balancers(
- self,
- ) -> load_balancers.AsyncLoadBalancersResourceWithRawResponse:
- from .resources.gpu_droplets.load_balancers import (
- AsyncLoadBalancersResourceWithRawResponse,
- )
-
- return AsyncLoadBalancersResourceWithRawResponse(self._client.load_balancers)
-
- @cached_property
- def sizes(self) -> sizes.AsyncSizesResourceWithRawResponse:
- from .resources.gpu_droplets.sizes import AsyncSizesResourceWithRawResponse
-
- return AsyncSizesResourceWithRawResponse(self._client.sizes)
-
- @cached_property
- def snapshots(self) -> snapshots.AsyncSnapshotsResourceWithRawResponse:
- from .resources.gpu_droplets.snapshots import (
- AsyncSnapshotsResourceWithRawResponse,
- )
-
- return AsyncSnapshotsResourceWithRawResponse(self._client.snapshots)
-
- @cached_property
- def volumes(self) -> volumes.AsyncVolumesResourceWithRawResponse:
- from .resources.gpu_droplets.volumes import AsyncVolumesResourceWithRawResponse
-
- return AsyncVolumesResourceWithRawResponse(self._client.volumes)
-
- @cached_property
- def account(self) -> account.AsyncAccountResourceWithRawResponse:
- from .resources.gpu_droplets.account import AsyncAccountResourceWithRawResponse
-
- return AsyncAccountResourceWithRawResponse(self._client.account)
+ def databases(self) -> databases.AsyncDatabasesResourceWithRawResponse:
+ from .resources.databases import AsyncDatabasesResourceWithRawResponse
+ return AsyncDatabasesResourceWithRawResponse(self._client.databases)
class GradientAIWithStreamedResponse:
_client: GradientAI
@@ -941,63 +806,10 @@ def regions(self) -> regions.RegionsResourceWithStreamingResponse:
return RegionsResourceWithStreamingResponse(self._client.regions)
@cached_property
- def firewalls(self) -> firewalls.FirewallsResourceWithStreamingResponse:
- from .resources.gpu_droplets.firewalls import (
- FirewallsResourceWithStreamingResponse,
- )
-
- return FirewallsResourceWithStreamingResponse(self._client.firewalls)
-
- @cached_property
- def floating_ips(self) -> floating_ips.FloatingIPsResourceWithStreamingResponse:
- from .resources.gpu_droplets.floating_ips import (
- FloatingIPsResourceWithStreamingResponse,
- )
-
- return FloatingIPsResourceWithStreamingResponse(self._client.floating_ips)
-
- @cached_property
- def images(self) -> images.ImagesResourceWithStreamingResponse:
- from .resources.gpu_droplets.images import ImagesResourceWithStreamingResponse
-
- return ImagesResourceWithStreamingResponse(self._client.images)
-
- @cached_property
- def load_balancers(
- self,
- ) -> load_balancers.LoadBalancersResourceWithStreamingResponse:
- from .resources.gpu_droplets.load_balancers import (
- LoadBalancersResourceWithStreamingResponse,
- )
-
- return LoadBalancersResourceWithStreamingResponse(self._client.load_balancers)
-
- @cached_property
- def sizes(self) -> sizes.SizesResourceWithStreamingResponse:
- from .resources.gpu_droplets.sizes import SizesResourceWithStreamingResponse
-
- return SizesResourceWithStreamingResponse(self._client.sizes)
-
- @cached_property
- def snapshots(self) -> snapshots.SnapshotsResourceWithStreamingResponse:
- from .resources.gpu_droplets.snapshots import (
- SnapshotsResourceWithStreamingResponse,
- )
-
- return SnapshotsResourceWithStreamingResponse(self._client.snapshots)
-
- @cached_property
- def volumes(self) -> volumes.VolumesResourceWithStreamingResponse:
- from .resources.gpu_droplets.volumes import VolumesResourceWithStreamingResponse
-
- return VolumesResourceWithStreamingResponse(self._client.volumes)
-
- @cached_property
- def account(self) -> account.AccountResourceWithStreamingResponse:
- from .resources.gpu_droplets.account import AccountResourceWithStreamingResponse
-
- return AccountResourceWithStreamingResponse(self._client.account)
+ def databases(self) -> databases.DatabasesResourceWithStreamingResponse:
+ from .resources.databases import DatabasesResourceWithStreamingResponse
+ return DatabasesResourceWithStreamingResponse(self._client.databases)
class AsyncGradientAIWithStreamedResponse:
_client: AsyncGradientAI
@@ -1042,72 +854,10 @@ def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse:
return AsyncRegionsResourceWithStreamingResponse(self._client.regions)
@cached_property
- def firewalls(self) -> firewalls.AsyncFirewallsResourceWithStreamingResponse:
- from .resources.gpu_droplets.firewalls import (
- AsyncFirewallsResourceWithStreamingResponse,
- )
-
- return AsyncFirewallsResourceWithStreamingResponse(self._client.firewalls)
-
- @cached_property
- def floating_ips(
- self,
- ) -> floating_ips.AsyncFloatingIPsResourceWithStreamingResponse:
- from .resources.gpu_droplets.floating_ips import (
- AsyncFloatingIPsResourceWithStreamingResponse,
- )
-
- return AsyncFloatingIPsResourceWithStreamingResponse(self._client.floating_ips)
-
- @cached_property
- def images(self) -> images.AsyncImagesResourceWithStreamingResponse:
- from .resources.gpu_droplets.images import (
- AsyncImagesResourceWithStreamingResponse,
- )
-
- return AsyncImagesResourceWithStreamingResponse(self._client.images)
-
- @cached_property
- def load_balancers(
- self,
- ) -> load_balancers.AsyncLoadBalancersResourceWithStreamingResponse:
- from .resources.gpu_droplets.load_balancers import (
- AsyncLoadBalancersResourceWithStreamingResponse,
- )
-
- return AsyncLoadBalancersResourceWithStreamingResponse(self._client.load_balancers)
-
- @cached_property
- def sizes(self) -> sizes.AsyncSizesResourceWithStreamingResponse:
- from .resources.gpu_droplets.sizes import (
- AsyncSizesResourceWithStreamingResponse,
- )
-
- return AsyncSizesResourceWithStreamingResponse(self._client.sizes)
-
- @cached_property
- def snapshots(self) -> snapshots.AsyncSnapshotsResourceWithStreamingResponse:
- from .resources.gpu_droplets.snapshots import (
- AsyncSnapshotsResourceWithStreamingResponse,
- )
-
- return AsyncSnapshotsResourceWithStreamingResponse(self._client.snapshots)
-
- @cached_property
- def volumes(self) -> volumes.AsyncVolumesResourceWithStreamingResponse:
- from .resources.gpu_droplets.volumes import (
- AsyncVolumesResourceWithStreamingResponse,
- )
-
- return AsyncVolumesResourceWithStreamingResponse(self._client.volumes)
-
- @cached_property
- def account(self) -> account.AsyncAccountResourceWithStreamingResponse:
- from .resources.gpu_droplets.account import (
- AsyncAccountResourceWithStreamingResponse,
- )
+ def databases(self) -> databases.AsyncDatabasesResourceWithStreamingResponse:
+ from .resources.databases import AsyncDatabasesResourceWithStreamingResponse
- return AsyncAccountResourceWithStreamingResponse(self._client.account)
+ return AsyncDatabasesResourceWithStreamingResponse(self._client.databases)
Client = GradientAI
diff --git a/src/do_gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py
index 55df767c..9cc8a609 100644
--- a/src/do_gradientai/resources/__init__.py
+++ b/src/do_gradientai/resources/__init__.py
@@ -24,6 +24,14 @@
RegionsResourceWithStreamingResponse,
AsyncRegionsResourceWithStreamingResponse,
)
+from .databases import (
+ DatabasesResource,
+ AsyncDatabasesResource,
+ DatabasesResourceWithRawResponse,
+ AsyncDatabasesResourceWithRawResponse,
+ DatabasesResourceWithStreamingResponse,
+ AsyncDatabasesResourceWithStreamingResponse,
+)
from .inference import (
InferenceResource,
AsyncInferenceResource,
@@ -86,4 +94,10 @@
"AsyncRegionsResourceWithRawResponse",
"RegionsResourceWithStreamingResponse",
"AsyncRegionsResourceWithStreamingResponse",
+ "DatabasesResource",
+ "AsyncDatabasesResource",
+ "DatabasesResourceWithRawResponse",
+ "AsyncDatabasesResourceWithRawResponse",
+ "DatabasesResourceWithStreamingResponse",
+ "AsyncDatabasesResourceWithStreamingResponse",
]
diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py
index ce687621..92449820 100644
--- a/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py
+++ b/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py
@@ -8,6 +8,22 @@
ModelsResourceWithStreamingResponse,
AsyncModelsResourceWithStreamingResponse,
)
+from .openai import (
+ OpenAIResource,
+ AsyncOpenAIResource,
+ OpenAIResourceWithRawResponse,
+ AsyncOpenAIResourceWithRawResponse,
+ OpenAIResourceWithStreamingResponse,
+ AsyncOpenAIResourceWithStreamingResponse,
+)
+from .anthropic import (
+ AnthropicResource,
+ AsyncAnthropicResource,
+ AnthropicResourceWithRawResponse,
+ AsyncAnthropicResourceWithRawResponse,
+ AnthropicResourceWithStreamingResponse,
+ AsyncAnthropicResourceWithStreamingResponse,
+)
from .workspaces import (
WorkspacesResource,
AsyncWorkspacesResource,
@@ -38,6 +54,18 @@
"AsyncModelsResourceWithRawResponse",
"ModelsResourceWithStreamingResponse",
"AsyncModelsResourceWithStreamingResponse",
+ "AnthropicResource",
+ "AsyncAnthropicResource",
+ "AnthropicResourceWithRawResponse",
+ "AsyncAnthropicResourceWithRawResponse",
+ "AnthropicResourceWithStreamingResponse",
+ "AsyncAnthropicResourceWithStreamingResponse",
+ "OpenAIResource",
+ "AsyncOpenAIResource",
+ "OpenAIResourceWithRawResponse",
+ "AsyncOpenAIResourceWithRawResponse",
+ "OpenAIResourceWithStreamingResponse",
+ "AsyncOpenAIResourceWithStreamingResponse",
"EvaluationMetricsResource",
"AsyncEvaluationMetricsResource",
"EvaluationMetricsResourceWithRawResponse",
diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/__init__.py
new file mode 100644
index 00000000..057a3a2f
--- /dev/null
+++ b/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .keys import (
+ KeysResource,
+ AsyncKeysResource,
+ KeysResourceWithRawResponse,
+ AsyncKeysResourceWithRawResponse,
+ KeysResourceWithStreamingResponse,
+ AsyncKeysResourceWithStreamingResponse,
+)
+from .anthropic import (
+ AnthropicResource,
+ AsyncAnthropicResource,
+ AnthropicResourceWithRawResponse,
+ AsyncAnthropicResourceWithRawResponse,
+ AnthropicResourceWithStreamingResponse,
+ AsyncAnthropicResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "KeysResource",
+ "AsyncKeysResource",
+ "KeysResourceWithRawResponse",
+ "AsyncKeysResourceWithRawResponse",
+ "KeysResourceWithStreamingResponse",
+ "AsyncKeysResourceWithStreamingResponse",
+ "AnthropicResource",
+ "AsyncAnthropicResource",
+ "AnthropicResourceWithRawResponse",
+ "AsyncAnthropicResourceWithRawResponse",
+ "AnthropicResourceWithStreamingResponse",
+ "AsyncAnthropicResourceWithStreamingResponse",
+]
diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/anthropic.py b/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/anthropic.py
new file mode 100644
index 00000000..1532f98e
--- /dev/null
+++ b/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/anthropic.py
@@ -0,0 +1,102 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .keys import (
+ KeysResource,
+ AsyncKeysResource,
+ KeysResourceWithRawResponse,
+ AsyncKeysResourceWithRawResponse,
+ KeysResourceWithStreamingResponse,
+ AsyncKeysResourceWithStreamingResponse,
+)
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["AnthropicResource", "AsyncAnthropicResource"]
+
+
+class AnthropicResource(SyncAPIResource):
+ @cached_property
+ def keys(self) -> KeysResource:
+ return KeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AnthropicResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AnthropicResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AnthropicResourceWithStreamingResponse(self)
+
+
+class AsyncAnthropicResource(AsyncAPIResource):
+ @cached_property
+ def keys(self) -> AsyncKeysResource:
+ return AsyncKeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAnthropicResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncAnthropicResourceWithStreamingResponse(self)
+
+
+class AnthropicResourceWithRawResponse:
+ def __init__(self, anthropic: AnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ @cached_property
+ def keys(self) -> KeysResourceWithRawResponse:
+ return KeysResourceWithRawResponse(self._anthropic.keys)
+
+
+class AsyncAnthropicResourceWithRawResponse:
+ def __init__(self, anthropic: AsyncAnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ @cached_property
+ def keys(self) -> AsyncKeysResourceWithRawResponse:
+ return AsyncKeysResourceWithRawResponse(self._anthropic.keys)
+
+
+class AnthropicResourceWithStreamingResponse:
+ def __init__(self, anthropic: AnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ @cached_property
+ def keys(self) -> KeysResourceWithStreamingResponse:
+ return KeysResourceWithStreamingResponse(self._anthropic.keys)
+
+
+class AsyncAnthropicResourceWithStreamingResponse:
+ def __init__(self, anthropic: AsyncAnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ @cached_property
+ def keys(self) -> AsyncKeysResourceWithStreamingResponse:
+ return AsyncKeysResourceWithStreamingResponse(self._anthropic.keys)
diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/keys.py b/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/keys.py
new file mode 100644
index 00000000..959e786b
--- /dev/null
+++ b/src/do_gradientai/resources/agents/evaluation_metrics/anthropic/keys.py
@@ -0,0 +1,711 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ....._utils import maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ....._base_client import make_request_options
+from .....types.agents.evaluation_metrics.anthropic import (
+ key_list_params,
+ key_create_params,
+ key_update_params,
+ key_list_agents_params,
+)
+from .....types.agents.evaluation_metrics.anthropic.key_list_response import KeyListResponse
+from .....types.agents.evaluation_metrics.anthropic.key_create_response import KeyCreateResponse
+from .....types.agents.evaluation_metrics.anthropic.key_delete_response import KeyDeleteResponse
+from .....types.agents.evaluation_metrics.anthropic.key_update_response import KeyUpdateResponse
+from .....types.agents.evaluation_metrics.anthropic.key_retrieve_response import KeyRetrieveResponse
+from .....types.agents.evaluation_metrics.anthropic.key_list_agents_response import KeyListAgentsResponse
+
+__all__ = ["KeysResource", "AsyncKeysResource"]
+
+
+class KeysResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> KeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return KeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> KeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return KeysResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyCreateResponse:
+ """
+ To create an Anthropic API key, send a POST request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ api_key: Anthropic API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ key_create_params.KeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyRetrieveResponse:
+ """
+ To retrieve details of an Anthropic API key, send a GET request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyRetrieveResponse,
+ )
+
+ def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ body_api_key_uuid: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyUpdateResponse:
+ """
+ To update an Anthropic API key, send a PUT request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: Anthropic API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ key_update_params.KeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyListResponse:
+ """
+ To list all Anthropic API keys, send a GET request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_params.KeyListParams,
+ ),
+ ),
+ cast_to=KeyListResponse,
+ )
+
+ def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyDeleteResponse:
+ """
+ To delete an Anthropic API key, send a DELETE request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyDeleteResponse,
+ )
+
+ def list_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyListAgentsResponse:
+ """
+ List Agents by Anthropic Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/anthropic/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_agents_params.KeyListAgentsParams,
+ ),
+ ),
+ cast_to=KeyListAgentsResponse,
+ )
+
+
+class AsyncKeysResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncKeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncKeysResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyCreateResponse:
+ """
+ To create an Anthropic API key, send a POST request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ api_key: Anthropic API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ key_create_params.KeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyRetrieveResponse:
+ """
+ To retrieve details of an Anthropic API key, send a GET request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ body_api_key_uuid: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyUpdateResponse:
+ """
+ To update an Anthropic API key, send a PUT request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: Anthropic API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ key_update_params.KeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyListResponse:
+ """
+ To list all Anthropic API keys, send a GET request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_params.KeyListParams,
+ ),
+ ),
+ cast_to=KeyListResponse,
+ )
+
+ async def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyDeleteResponse:
+ """
+ To delete an Anthropic API key, send a DELETE request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyDeleteResponse,
+ )
+
+ async def list_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyListAgentsResponse:
+ """
+ List Agents by Anthropic Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/anthropic/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_agents_params.KeyListAgentsParams,
+ ),
+ ),
+ cast_to=KeyListAgentsResponse,
+ )
+
+
+class KeysResourceWithRawResponse:
+ def __init__(self, keys: KeysResource) -> None:
+ self._keys = keys
+
+ self.create = to_raw_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ keys.update,
+ )
+ self.list = to_raw_response_wrapper(
+ keys.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = to_raw_response_wrapper(
+ keys.list_agents,
+ )
+
+
+class AsyncKeysResourceWithRawResponse:
+ def __init__(self, keys: AsyncKeysResource) -> None:
+ self._keys = keys
+
+ self.create = async_to_raw_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ keys.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ keys.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = async_to_raw_response_wrapper(
+ keys.list_agents,
+ )
+
+
+class KeysResourceWithStreamingResponse:
+ def __init__(self, keys: KeysResource) -> None:
+ self._keys = keys
+
+ self.create = to_streamed_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ keys.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ keys.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = to_streamed_response_wrapper(
+ keys.list_agents,
+ )
+
+
+class AsyncKeysResourceWithStreamingResponse:
+ def __init__(self, keys: AsyncKeysResource) -> None:
+ self._keys = keys
+
+ self.create = async_to_streamed_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ keys.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ keys.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = async_to_streamed_response_wrapper(
+ keys.list_agents,
+ )
diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py
index edf708df..533a68bd 100644
--- a/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py
+++ b/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py
@@ -22,8 +22,24 @@
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
+from .openai.openai import (
+ OpenAIResource,
+ AsyncOpenAIResource,
+ OpenAIResourceWithRawResponse,
+ AsyncOpenAIResourceWithRawResponse,
+ OpenAIResourceWithStreamingResponse,
+ AsyncOpenAIResourceWithStreamingResponse,
+)
from ...._base_client import make_request_options
from ....types.agents import evaluation_metric_list_regions_params
+from .anthropic.anthropic import (
+ AnthropicResource,
+ AsyncAnthropicResource,
+ AnthropicResourceWithRawResponse,
+ AsyncAnthropicResourceWithRawResponse,
+ AnthropicResourceWithStreamingResponse,
+ AsyncAnthropicResourceWithStreamingResponse,
+)
from .workspaces.workspaces import (
WorkspacesResource,
AsyncWorkspacesResource,
@@ -47,6 +63,14 @@ def workspaces(self) -> WorkspacesResource:
def models(self) -> ModelsResource:
return ModelsResource(self._client)
+ @cached_property
+ def anthropic(self) -> AnthropicResource:
+ return AnthropicResource(self._client)
+
+ @cached_property
+ def openai(self) -> OpenAIResource:
+ return OpenAIResource(self._client)
+
@cached_property
def with_raw_response(self) -> EvaluationMetricsResourceWithRawResponse:
"""
@@ -148,6 +172,14 @@ def workspaces(self) -> AsyncWorkspacesResource:
def models(self) -> AsyncModelsResource:
return AsyncModelsResource(self._client)
+ @cached_property
+ def anthropic(self) -> AsyncAnthropicResource:
+ return AsyncAnthropicResource(self._client)
+
+ @cached_property
+ def openai(self) -> AsyncOpenAIResource:
+ return AsyncOpenAIResource(self._client)
+
@cached_property
def with_raw_response(self) -> AsyncEvaluationMetricsResourceWithRawResponse:
"""
@@ -259,6 +291,14 @@ def workspaces(self) -> WorkspacesResourceWithRawResponse:
def models(self) -> ModelsResourceWithRawResponse:
return ModelsResourceWithRawResponse(self._evaluation_metrics.models)
+ @cached_property
+ def anthropic(self) -> AnthropicResourceWithRawResponse:
+ return AnthropicResourceWithRawResponse(self._evaluation_metrics.anthropic)
+
+ @cached_property
+ def openai(self) -> OpenAIResourceWithRawResponse:
+ return OpenAIResourceWithRawResponse(self._evaluation_metrics.openai)
+
class AsyncEvaluationMetricsResourceWithRawResponse:
def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None:
@@ -279,6 +319,14 @@ def workspaces(self) -> AsyncWorkspacesResourceWithRawResponse:
def models(self) -> AsyncModelsResourceWithRawResponse:
return AsyncModelsResourceWithRawResponse(self._evaluation_metrics.models)
+ @cached_property
+ def anthropic(self) -> AsyncAnthropicResourceWithRawResponse:
+ return AsyncAnthropicResourceWithRawResponse(self._evaluation_metrics.anthropic)
+
+ @cached_property
+ def openai(self) -> AsyncOpenAIResourceWithRawResponse:
+ return AsyncOpenAIResourceWithRawResponse(self._evaluation_metrics.openai)
+
class EvaluationMetricsResourceWithStreamingResponse:
def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None:
@@ -299,6 +347,14 @@ def workspaces(self) -> WorkspacesResourceWithStreamingResponse:
def models(self) -> ModelsResourceWithStreamingResponse:
return ModelsResourceWithStreamingResponse(self._evaluation_metrics.models)
+ @cached_property
+ def anthropic(self) -> AnthropicResourceWithStreamingResponse:
+ return AnthropicResourceWithStreamingResponse(self._evaluation_metrics.anthropic)
+
+ @cached_property
+ def openai(self) -> OpenAIResourceWithStreamingResponse:
+ return OpenAIResourceWithStreamingResponse(self._evaluation_metrics.openai)
+
class AsyncEvaluationMetricsResourceWithStreamingResponse:
def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None:
@@ -318,3 +374,11 @@ def workspaces(self) -> AsyncWorkspacesResourceWithStreamingResponse:
@cached_property
def models(self) -> AsyncModelsResourceWithStreamingResponse:
return AsyncModelsResourceWithStreamingResponse(self._evaluation_metrics.models)
+
+ @cached_property
+ def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse:
+ return AsyncAnthropicResourceWithStreamingResponse(self._evaluation_metrics.anthropic)
+
+ @cached_property
+ def openai(self) -> AsyncOpenAIResourceWithStreamingResponse:
+ return AsyncOpenAIResourceWithStreamingResponse(self._evaluation_metrics.openai)
diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/openai/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/openai/__init__.py
new file mode 100644
index 00000000..66d8ca7a
--- /dev/null
+++ b/src/do_gradientai/resources/agents/evaluation_metrics/openai/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .keys import (
+ KeysResource,
+ AsyncKeysResource,
+ KeysResourceWithRawResponse,
+ AsyncKeysResourceWithRawResponse,
+ KeysResourceWithStreamingResponse,
+ AsyncKeysResourceWithStreamingResponse,
+)
+from .openai import (
+ OpenAIResource,
+ AsyncOpenAIResource,
+ OpenAIResourceWithRawResponse,
+ AsyncOpenAIResourceWithRawResponse,
+ OpenAIResourceWithStreamingResponse,
+ AsyncOpenAIResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "KeysResource",
+ "AsyncKeysResource",
+ "KeysResourceWithRawResponse",
+ "AsyncKeysResourceWithRawResponse",
+ "KeysResourceWithStreamingResponse",
+ "AsyncKeysResourceWithStreamingResponse",
+ "OpenAIResource",
+ "AsyncOpenAIResource",
+ "OpenAIResourceWithRawResponse",
+ "AsyncOpenAIResourceWithRawResponse",
+ "OpenAIResourceWithStreamingResponse",
+ "AsyncOpenAIResourceWithStreamingResponse",
+]
diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/openai/keys.py b/src/do_gradientai/resources/agents/evaluation_metrics/openai/keys.py
new file mode 100644
index 00000000..33a71ae1
--- /dev/null
+++ b/src/do_gradientai/resources/agents/evaluation_metrics/openai/keys.py
@@ -0,0 +1,707 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ....._utils import maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ....._base_client import make_request_options
+from .....types.agents.evaluation_metrics.openai import (
+ key_list_params,
+ key_create_params,
+ key_update_params,
+ key_list_agents_params,
+)
+from .....types.agents.evaluation_metrics.openai.key_list_response import KeyListResponse
+from .....types.agents.evaluation_metrics.openai.key_create_response import KeyCreateResponse
+from .....types.agents.evaluation_metrics.openai.key_delete_response import KeyDeleteResponse
+from .....types.agents.evaluation_metrics.openai.key_update_response import KeyUpdateResponse
+from .....types.agents.evaluation_metrics.openai.key_retrieve_response import KeyRetrieveResponse
+from .....types.agents.evaluation_metrics.openai.key_list_agents_response import KeyListAgentsResponse
+
+__all__ = ["KeysResource", "AsyncKeysResource"]
+
+
+class KeysResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> KeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return KeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> KeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return KeysResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyCreateResponse:
+ """
+ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ api_key: OpenAI API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ key_create_params.KeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyRetrieveResponse:
+ """
+ To retrieve details of an OpenAI API key, send a GET request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyRetrieveResponse,
+ )
+
+ def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ body_api_key_uuid: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyUpdateResponse:
+ """
+ To update an OpenAI API key, send a PUT request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: OpenAI API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/openai/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ key_update_params.KeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyListResponse:
+ """
+ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_params.KeyListParams,
+ ),
+ ),
+ cast_to=KeyListResponse,
+ )
+
+ def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyDeleteResponse:
+ """
+ To delete an OpenAI API key, send a DELETE request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyDeleteResponse,
+ )
+
+ def list_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyListAgentsResponse:
+ """
+ List Agents by OpenAI Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/openai/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_agents_params.KeyListAgentsParams,
+ ),
+ ),
+ cast_to=KeyListAgentsResponse,
+ )
+
+
+class AsyncKeysResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncKeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncKeysResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyCreateResponse:
+ """
+ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ api_key: OpenAI API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ key_create_params.KeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyRetrieveResponse:
+ """
+ To retrieve details of an OpenAI API key, send a GET request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ body_api_key_uuid: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyUpdateResponse:
+ """
+ To update an OpenAI API key, send a PUT request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: OpenAI API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/openai/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ key_update_params.KeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyListResponse:
+ """
+ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_params.KeyListParams,
+ ),
+ ),
+ cast_to=KeyListResponse,
+ )
+
+ async def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyDeleteResponse:
+ """
+ To delete an OpenAI API key, send a DELETE request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyDeleteResponse,
+ )
+
+ async def list_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyListAgentsResponse:
+ """
+ List Agents by OpenAI Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/openai/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_agents_params.KeyListAgentsParams,
+ ),
+ ),
+ cast_to=KeyListAgentsResponse,
+ )
+
+
+class KeysResourceWithRawResponse:
+ def __init__(self, keys: KeysResource) -> None:
+ self._keys = keys
+
+ self.create = to_raw_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ keys.update,
+ )
+ self.list = to_raw_response_wrapper(
+ keys.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = to_raw_response_wrapper(
+ keys.list_agents,
+ )
+
+
+class AsyncKeysResourceWithRawResponse:
+ def __init__(self, keys: AsyncKeysResource) -> None:
+ self._keys = keys
+
+ self.create = async_to_raw_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ keys.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ keys.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = async_to_raw_response_wrapper(
+ keys.list_agents,
+ )
+
+
+class KeysResourceWithStreamingResponse:
+ def __init__(self, keys: KeysResource) -> None:
+ self._keys = keys
+
+ self.create = to_streamed_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ keys.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ keys.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = to_streamed_response_wrapper(
+ keys.list_agents,
+ )
+
+
+class AsyncKeysResourceWithStreamingResponse:
+ def __init__(self, keys: AsyncKeysResource) -> None:
+ self._keys = keys
+
+ self.create = async_to_streamed_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ keys.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ keys.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = async_to_streamed_response_wrapper(
+ keys.list_agents,
+ )
diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/openai/openai.py b/src/do_gradientai/resources/agents/evaluation_metrics/openai/openai.py
new file mode 100644
index 00000000..d66dbbde
--- /dev/null
+++ b/src/do_gradientai/resources/agents/evaluation_metrics/openai/openai.py
@@ -0,0 +1,102 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .keys import (
+ KeysResource,
+ AsyncKeysResource,
+ KeysResourceWithRawResponse,
+ AsyncKeysResourceWithRawResponse,
+ KeysResourceWithStreamingResponse,
+ AsyncKeysResourceWithStreamingResponse,
+)
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["OpenAIResource", "AsyncOpenAIResource"]
+
+
+class OpenAIResource(SyncAPIResource):
+ @cached_property
+ def keys(self) -> KeysResource:
+ return KeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> OpenAIResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return OpenAIResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return OpenAIResourceWithStreamingResponse(self)
+
+
+class AsyncOpenAIResource(AsyncAPIResource):
+ @cached_property
+ def keys(self) -> AsyncKeysResource:
+ return AsyncKeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncOpenAIResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncOpenAIResourceWithStreamingResponse(self)
+
+
+class OpenAIResourceWithRawResponse:
+ def __init__(self, openai: OpenAIResource) -> None:
+ self._openai = openai
+
+ @cached_property
+ def keys(self) -> KeysResourceWithRawResponse:
+ return KeysResourceWithRawResponse(self._openai.keys)
+
+
+class AsyncOpenAIResourceWithRawResponse:
+ def __init__(self, openai: AsyncOpenAIResource) -> None:
+ self._openai = openai
+
+ @cached_property
+ def keys(self) -> AsyncKeysResourceWithRawResponse:
+ return AsyncKeysResourceWithRawResponse(self._openai.keys)
+
+
+class OpenAIResourceWithStreamingResponse:
+ def __init__(self, openai: OpenAIResource) -> None:
+ self._openai = openai
+
+ @cached_property
+ def keys(self) -> KeysResourceWithStreamingResponse:
+ return KeysResourceWithStreamingResponse(self._openai.keys)
+
+
+class AsyncOpenAIResourceWithStreamingResponse:
+ def __init__(self, openai: AsyncOpenAIResource) -> None:
+ self._openai = openai
+
+ @cached_property
+ def keys(self) -> AsyncKeysResourceWithStreamingResponse:
+ return AsyncKeysResourceWithStreamingResponse(self._openai.keys)
diff --git a/src/do_gradientai/resources/databases/__init__.py b/src/do_gradientai/resources/databases/__init__.py
new file mode 100644
index 00000000..40c62ed8
--- /dev/null
+++ b/src/do_gradientai/resources/databases/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .databases import (
+ DatabasesResource,
+ AsyncDatabasesResource,
+ DatabasesResourceWithRawResponse,
+ AsyncDatabasesResourceWithRawResponse,
+ DatabasesResourceWithStreamingResponse,
+ AsyncDatabasesResourceWithStreamingResponse,
+)
+from .schema_registry import (
+ SchemaRegistryResource,
+ AsyncSchemaRegistryResource,
+ SchemaRegistryResourceWithRawResponse,
+ AsyncSchemaRegistryResourceWithRawResponse,
+ SchemaRegistryResourceWithStreamingResponse,
+ AsyncSchemaRegistryResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "SchemaRegistryResource",
+ "AsyncSchemaRegistryResource",
+ "SchemaRegistryResourceWithRawResponse",
+ "AsyncSchemaRegistryResourceWithRawResponse",
+ "SchemaRegistryResourceWithStreamingResponse",
+ "AsyncSchemaRegistryResourceWithStreamingResponse",
+ "DatabasesResource",
+ "AsyncDatabasesResource",
+ "DatabasesResourceWithRawResponse",
+ "AsyncDatabasesResourceWithRawResponse",
+ "DatabasesResourceWithStreamingResponse",
+ "AsyncDatabasesResourceWithStreamingResponse",
+]
diff --git a/src/do_gradientai/resources/databases/databases.py b/src/do_gradientai/resources/databases/databases.py
new file mode 100644
index 00000000..e1f990d5
--- /dev/null
+++ b/src/do_gradientai/resources/databases/databases.py
@@ -0,0 +1,102 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from .schema_registry.schema_registry import (
+ SchemaRegistryResource,
+ AsyncSchemaRegistryResource,
+ SchemaRegistryResourceWithRawResponse,
+ AsyncSchemaRegistryResourceWithRawResponse,
+ SchemaRegistryResourceWithStreamingResponse,
+ AsyncSchemaRegistryResourceWithStreamingResponse,
+)
+
+__all__ = ["DatabasesResource", "AsyncDatabasesResource"]
+
+
+class DatabasesResource(SyncAPIResource):
+ @cached_property
+ def schema_registry(self) -> SchemaRegistryResource:
+ return SchemaRegistryResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> DatabasesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return DatabasesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> DatabasesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return DatabasesResourceWithStreamingResponse(self)
+
+
+class AsyncDatabasesResource(AsyncAPIResource):
+ @cached_property
+ def schema_registry(self) -> AsyncSchemaRegistryResource:
+ return AsyncSchemaRegistryResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncDatabasesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncDatabasesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncDatabasesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncDatabasesResourceWithStreamingResponse(self)
+
+
+class DatabasesResourceWithRawResponse:
+ def __init__(self, databases: DatabasesResource) -> None:
+ self._databases = databases
+
+ @cached_property
+ def schema_registry(self) -> SchemaRegistryResourceWithRawResponse:
+ return SchemaRegistryResourceWithRawResponse(self._databases.schema_registry)
+
+
+class AsyncDatabasesResourceWithRawResponse:
+ def __init__(self, databases: AsyncDatabasesResource) -> None:
+ self._databases = databases
+
+ @cached_property
+ def schema_registry(self) -> AsyncSchemaRegistryResourceWithRawResponse:
+ return AsyncSchemaRegistryResourceWithRawResponse(self._databases.schema_registry)
+
+
+class DatabasesResourceWithStreamingResponse:
+ def __init__(self, databases: DatabasesResource) -> None:
+ self._databases = databases
+
+ @cached_property
+ def schema_registry(self) -> SchemaRegistryResourceWithStreamingResponse:
+ return SchemaRegistryResourceWithStreamingResponse(self._databases.schema_registry)
+
+
+class AsyncDatabasesResourceWithStreamingResponse:
+ def __init__(self, databases: AsyncDatabasesResource) -> None:
+ self._databases = databases
+
+ @cached_property
+ def schema_registry(self) -> AsyncSchemaRegistryResourceWithStreamingResponse:
+ return AsyncSchemaRegistryResourceWithStreamingResponse(self._databases.schema_registry)
diff --git a/src/do_gradientai/resources/databases/schema_registry/__init__.py b/src/do_gradientai/resources/databases/schema_registry/__init__.py
new file mode 100644
index 00000000..2015e4d4
--- /dev/null
+++ b/src/do_gradientai/resources/databases/schema_registry/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .config import (
+ ConfigResource,
+ AsyncConfigResource,
+ ConfigResourceWithRawResponse,
+ AsyncConfigResourceWithRawResponse,
+ ConfigResourceWithStreamingResponse,
+ AsyncConfigResourceWithStreamingResponse,
+)
+from .schema_registry import (
+ SchemaRegistryResource,
+ AsyncSchemaRegistryResource,
+ SchemaRegistryResourceWithRawResponse,
+ AsyncSchemaRegistryResourceWithRawResponse,
+ SchemaRegistryResourceWithStreamingResponse,
+ AsyncSchemaRegistryResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "ConfigResource",
+ "AsyncConfigResource",
+ "ConfigResourceWithRawResponse",
+ "AsyncConfigResourceWithRawResponse",
+ "ConfigResourceWithStreamingResponse",
+ "AsyncConfigResourceWithStreamingResponse",
+ "SchemaRegistryResource",
+ "AsyncSchemaRegistryResource",
+ "SchemaRegistryResourceWithRawResponse",
+ "AsyncSchemaRegistryResourceWithRawResponse",
+ "SchemaRegistryResourceWithStreamingResponse",
+ "AsyncSchemaRegistryResourceWithStreamingResponse",
+]
diff --git a/src/do_gradientai/resources/databases/schema_registry/config.py b/src/do_gradientai/resources/databases/schema_registry/config.py
new file mode 100644
index 00000000..a815b84e
--- /dev/null
+++ b/src/do_gradientai/resources/databases/schema_registry/config.py
@@ -0,0 +1,506 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal
+
+import httpx
+
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.databases.schema_registry import config_update_params, config_update_subject_params
+from ....types.databases.schema_registry.config_update_response import ConfigUpdateResponse
+from ....types.databases.schema_registry.config_retrieve_response import ConfigRetrieveResponse
+from ....types.databases.schema_registry.config_update_subject_response import ConfigUpdateSubjectResponse
+from ....types.databases.schema_registry.config_retrieve_subject_response import ConfigRetrieveSubjectResponse
+
+__all__ = ["ConfigResource", "AsyncConfigResource"]
+
+
+class ConfigResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> ConfigResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return ConfigResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ConfigResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return ConfigResourceWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ database_cluster_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ConfigRetrieveResponse:
+ """
+ To retrieve the Schema Registry configuration for a Kafka cluster, send a GET
+ request to `/v2/databases/$DATABASE_ID/schema-registry/config`. The response is
+ a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ return self._get(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigRetrieveResponse,
+ )
+
+ def update(
+ self,
+ database_cluster_uuid: str,
+ *,
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ConfigUpdateResponse:
+ """
+ To update the Schema Registry configuration for a Kafka cluster, send a PUT
+ request to `/v2/databases/$DATABASE_ID/schema-registry/config`. The response is
+ a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ compatibility_level: The compatibility level of the schema registry.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ return self._put(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config",
+ body=maybe_transform({"compatibility_level": compatibility_level}, config_update_params.ConfigUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigUpdateResponse,
+ )
+
+ def retrieve_subject(
+ self,
+ subject_name: str,
+ *,
+ database_cluster_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ConfigRetrieveSubjectResponse:
+ """
+ To retrieve the Schema Registry configuration for a Subject of a Kafka cluster,
+ send a GET request to
+ `/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME`. The response
+ is a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ if not subject_name:
+ raise ValueError(f"Expected a non-empty value for `subject_name` but received {subject_name!r}")
+ return self._get(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigRetrieveSubjectResponse,
+ )
+
+ def update_subject(
+ self,
+ subject_name: str,
+ *,
+ database_cluster_uuid: str,
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ConfigUpdateSubjectResponse:
+ """
+ To update the Schema Registry configuration for a Subject of a Kafka cluster,
+ send a PUT request to
+ `/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME`. The response
+ is a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ compatibility_level: The compatibility level of the schema registry.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ if not subject_name:
+ raise ValueError(f"Expected a non-empty value for `subject_name` but received {subject_name!r}")
+ return self._put(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}",
+ body=maybe_transform(
+ {"compatibility_level": compatibility_level}, config_update_subject_params.ConfigUpdateSubjectParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigUpdateSubjectResponse,
+ )
+
+
+class AsyncConfigResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncConfigResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncConfigResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncConfigResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncConfigResourceWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ database_cluster_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ConfigRetrieveResponse:
+ """
+ To retrieve the Schema Registry configuration for a Kafka cluster, send a GET
+ request to `/v2/databases/$DATABASE_ID/schema-registry/config`. The response is
+ a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ return await self._get(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ database_cluster_uuid: str,
+ *,
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ConfigUpdateResponse:
+ """
+ To update the Schema Registry configuration for a Kafka cluster, send a PUT
+ request to `/v2/databases/$DATABASE_ID/schema-registry/config`. The response is
+ a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ compatibility_level: The compatibility level of the schema registry.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ return await self._put(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config",
+ body=await async_maybe_transform(
+ {"compatibility_level": compatibility_level}, config_update_params.ConfigUpdateParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigUpdateResponse,
+ )
+
+ async def retrieve_subject(
+ self,
+ subject_name: str,
+ *,
+ database_cluster_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ConfigRetrieveSubjectResponse:
+ """
+ To retrieve the Schema Registry configuration for a Subject of a Kafka cluster,
+ send a GET request to
+ `/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME`. The response
+ is a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ if not subject_name:
+ raise ValueError(f"Expected a non-empty value for `subject_name` but received {subject_name!r}")
+ return await self._get(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigRetrieveSubjectResponse,
+ )
+
+ async def update_subject(
+ self,
+ subject_name: str,
+ *,
+ database_cluster_uuid: str,
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ConfigUpdateSubjectResponse:
+ """
+ To update the Schema Registry configuration for a Subject of a Kafka cluster,
+ send a PUT request to
+ `/v2/databases/$DATABASE_ID/schema-registry/config/$SUBJECT_NAME`. The response
+ is a JSON object with a `compatibility_level` key, which is set to an object
+ containing any database configuration parameters.
+
+ Args:
+ compatibility_level: The compatibility level of the schema registry.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not database_cluster_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `database_cluster_uuid` but received {database_cluster_uuid!r}"
+ )
+ if not subject_name:
+ raise ValueError(f"Expected a non-empty value for `subject_name` but received {subject_name!r}")
+ return await self._put(
+ f"/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/databases/{database_cluster_uuid}/schema-registry/config/{subject_name}",
+ body=await async_maybe_transform(
+ {"compatibility_level": compatibility_level}, config_update_subject_params.ConfigUpdateSubjectParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConfigUpdateSubjectResponse,
+ )
+
+
+class ConfigResourceWithRawResponse:
+ def __init__(self, config: ConfigResource) -> None:
+ self._config = config
+
+ self.retrieve = to_raw_response_wrapper(
+ config.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ config.update,
+ )
+ self.retrieve_subject = to_raw_response_wrapper(
+ config.retrieve_subject,
+ )
+ self.update_subject = to_raw_response_wrapper(
+ config.update_subject,
+ )
+
+
+class AsyncConfigResourceWithRawResponse:
+ def __init__(self, config: AsyncConfigResource) -> None:
+ self._config = config
+
+ self.retrieve = async_to_raw_response_wrapper(
+ config.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ config.update,
+ )
+ self.retrieve_subject = async_to_raw_response_wrapper(
+ config.retrieve_subject,
+ )
+ self.update_subject = async_to_raw_response_wrapper(
+ config.update_subject,
+ )
+
+
+class ConfigResourceWithStreamingResponse:
+ def __init__(self, config: ConfigResource) -> None:
+ self._config = config
+
+ self.retrieve = to_streamed_response_wrapper(
+ config.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ config.update,
+ )
+ self.retrieve_subject = to_streamed_response_wrapper(
+ config.retrieve_subject,
+ )
+ self.update_subject = to_streamed_response_wrapper(
+ config.update_subject,
+ )
+
+
+class AsyncConfigResourceWithStreamingResponse:
+ def __init__(self, config: AsyncConfigResource) -> None:
+ self._config = config
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ config.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ config.update,
+ )
+ self.retrieve_subject = async_to_streamed_response_wrapper(
+ config.retrieve_subject,
+ )
+ self.update_subject = async_to_streamed_response_wrapper(
+ config.update_subject,
+ )
diff --git a/src/do_gradientai/resources/databases/schema_registry/schema_registry.py b/src/do_gradientai/resources/databases/schema_registry/schema_registry.py
new file mode 100644
index 00000000..6a0a44fb
--- /dev/null
+++ b/src/do_gradientai/resources/databases/schema_registry/schema_registry.py
@@ -0,0 +1,102 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .config import (
+ ConfigResource,
+ AsyncConfigResource,
+ ConfigResourceWithRawResponse,
+ AsyncConfigResourceWithRawResponse,
+ ConfigResourceWithStreamingResponse,
+ AsyncConfigResourceWithStreamingResponse,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["SchemaRegistryResource", "AsyncSchemaRegistryResource"]
+
+
+class SchemaRegistryResource(SyncAPIResource):
+ @cached_property
+ def config(self) -> ConfigResource:
+ return ConfigResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> SchemaRegistryResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return SchemaRegistryResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> SchemaRegistryResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return SchemaRegistryResourceWithStreamingResponse(self)
+
+
+class AsyncSchemaRegistryResource(AsyncAPIResource):
+ @cached_property
+ def config(self) -> AsyncConfigResource:
+ return AsyncConfigResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncSchemaRegistryResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncSchemaRegistryResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncSchemaRegistryResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncSchemaRegistryResourceWithStreamingResponse(self)
+
+
+class SchemaRegistryResourceWithRawResponse:
+ def __init__(self, schema_registry: SchemaRegistryResource) -> None:
+ self._schema_registry = schema_registry
+
+ @cached_property
+ def config(self) -> ConfigResourceWithRawResponse:
+ return ConfigResourceWithRawResponse(self._schema_registry.config)
+
+
+class AsyncSchemaRegistryResourceWithRawResponse:
+ def __init__(self, schema_registry: AsyncSchemaRegistryResource) -> None:
+ self._schema_registry = schema_registry
+
+ @cached_property
+ def config(self) -> AsyncConfigResourceWithRawResponse:
+ return AsyncConfigResourceWithRawResponse(self._schema_registry.config)
+
+
+class SchemaRegistryResourceWithStreamingResponse:
+ def __init__(self, schema_registry: SchemaRegistryResource) -> None:
+ self._schema_registry = schema_registry
+
+ @cached_property
+ def config(self) -> ConfigResourceWithStreamingResponse:
+ return ConfigResourceWithStreamingResponse(self._schema_registry.config)
+
+
+class AsyncSchemaRegistryResourceWithStreamingResponse:
+ def __init__(self, schema_registry: AsyncSchemaRegistryResource) -> None:
+ self._schema_registry = schema_registry
+
+ @cached_property
+ def config(self) -> AsyncConfigResourceWithStreamingResponse:
+ return AsyncConfigResourceWithStreamingResponse(self._schema_registry.config)
diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py
index 56e94b41..a7ed7329 100644
--- a/src/do_gradientai/types/__init__.py
+++ b/src/do_gradientai/types/__init__.py
@@ -169,6 +169,12 @@
agents.evaluation_metrics.workspaces.agent_move_response.AgentMoveResponse.model_rebuild(
_parent_namespace_depth=0
)
+ agents.evaluation_metrics.anthropic.key_list_agents_response.KeyListAgentsResponse.model_rebuild(
+ _parent_namespace_depth=0
+ )
+ agents.evaluation_metrics.openai.key_list_agents_response.KeyListAgentsResponse.model_rebuild(
+ _parent_namespace_depth=0
+ )
agents.function_create_response.FunctionCreateResponse.model_rebuild(
_parent_namespace_depth=0
)
@@ -201,6 +207,8 @@
agents.evaluation_metrics.workspace_list_response.WorkspaceListResponse.update_forward_refs() # type: ignore
agents.evaluation_metrics.workspaces.agent_list_response.AgentListResponse.update_forward_refs() # type: ignore
agents.evaluation_metrics.workspaces.agent_move_response.AgentMoveResponse.update_forward_refs() # type: ignore
+ agents.evaluation_metrics.anthropic.key_list_agents_response.KeyListAgentsResponse.update_forward_refs() # type: ignore
+ agents.evaluation_metrics.openai.key_list_agents_response.KeyListAgentsResponse.update_forward_refs() # type: ignore
agents.function_create_response.FunctionCreateResponse.update_forward_refs() # type: ignore
agents.function_update_response.FunctionUpdateResponse.update_forward_refs() # type: ignore
agents.function_delete_response.FunctionDeleteResponse.update_forward_refs() # type: ignore
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/__init__.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/__init__.py
new file mode 100644
index 00000000..eb47e709
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/__init__.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .key_list_params import KeyListParams as KeyListParams
+from .key_create_params import KeyCreateParams as KeyCreateParams
+from .key_list_response import KeyListResponse as KeyListResponse
+from .key_update_params import KeyUpdateParams as KeyUpdateParams
+from .key_create_response import KeyCreateResponse as KeyCreateResponse
+from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse
+from .key_update_response import KeyUpdateResponse as KeyUpdateResponse
+from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse
+from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams
+from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_params.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_params.py
new file mode 100644
index 00000000..55f44139
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyCreateParams"]
+
+
+class KeyCreateParams(TypedDict, total=False):
+ api_key: str
+ """Anthropic API key"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_response.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_response.py
new file mode 100644
index 00000000..24b7bbb2
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_create_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["KeyCreateResponse"]
+
+
+class KeyCreateResponse(BaseModel):
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_delete_response.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_delete_response.py
new file mode 100644
index 00000000..b5d8584e
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_delete_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["KeyDeleteResponse"]
+
+
+class KeyDeleteResponse(BaseModel):
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py
new file mode 100644
index 00000000..566c39f7
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyListAgentsParams"]
+
+
+class KeyListAgentsParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py
new file mode 100644
index 00000000..633211cc
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_agents_response.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ....._models import BaseModel
+from ....shared.api_meta import APIMeta
+from ....shared.api_links import APILinks
+
+__all__ = ["KeyListAgentsResponse"]
+
+
+class KeyListAgentsResponse(BaseModel):
+ agents: Optional[List["APIAgent"]] = None
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
+
+
+from ....api_agent import APIAgent
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_params.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_params.py
new file mode 100644
index 00000000..1611dc03
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyListParams"]
+
+
+class KeyListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_response.py
new file mode 100644
index 00000000..edc9e75a
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_list_response.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ....._models import BaseModel
+from ....shared.api_meta import APIMeta
+from ....shared.api_links import APILinks
+from ....api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["KeyListResponse"]
+
+
+class KeyListResponse(BaseModel):
+ api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None
+ """Api key infos"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py
new file mode 100644
index 00000000..a100ec29
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_retrieve_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["KeyRetrieveResponse"]
+
+
+class KeyRetrieveResponse(BaseModel):
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_params.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_params.py
new file mode 100644
index 00000000..0d542bbb
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_params.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ....._utils import PropertyInfo
+
+__all__ = ["KeyUpdateParams"]
+
+
+class KeyUpdateParams(TypedDict, total=False):
+ api_key: str
+ """Anthropic API key"""
+
+ body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
+ """API key ID"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_response.py b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_response.py
new file mode 100644
index 00000000..06fa2d18
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/anthropic/key_update_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["KeyUpdateResponse"]
+
+
+class KeyUpdateResponse(BaseModel):
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/__init__.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/__init__.py
new file mode 100644
index 00000000..eb47e709
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/__init__.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .key_list_params import KeyListParams as KeyListParams
+from .key_create_params import KeyCreateParams as KeyCreateParams
+from .key_list_response import KeyListResponse as KeyListResponse
+from .key_update_params import KeyUpdateParams as KeyUpdateParams
+from .key_create_response import KeyCreateResponse as KeyCreateResponse
+from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse
+from .key_update_response import KeyUpdateResponse as KeyUpdateResponse
+from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse
+from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams
+from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_params.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_params.py
new file mode 100644
index 00000000..5f4975dd
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyCreateParams"]
+
+
+class KeyCreateParams(TypedDict, total=False):
+ api_key: str
+ """OpenAI API key"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_response.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_response.py
new file mode 100644
index 00000000..4af7b872
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_create_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["KeyCreateResponse"]
+
+
+class KeyCreateResponse(BaseModel):
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_delete_response.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_delete_response.py
new file mode 100644
index 00000000..f1ebc73a
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_delete_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["KeyDeleteResponse"]
+
+
+class KeyDeleteResponse(BaseModel):
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_params.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_params.py
new file mode 100644
index 00000000..566c39f7
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyListAgentsParams"]
+
+
+class KeyListAgentsParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_response.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_response.py
new file mode 100644
index 00000000..633211cc
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_agents_response.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ....._models import BaseModel
+from ....shared.api_meta import APIMeta
+from ....shared.api_links import APILinks
+
+__all__ = ["KeyListAgentsResponse"]
+
+
+class KeyListAgentsResponse(BaseModel):
+ agents: Optional[List["APIAgent"]] = None
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
+
+
+from ....api_agent import APIAgent
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_params.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_params.py
new file mode 100644
index 00000000..1611dc03
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyListParams"]
+
+
+class KeyListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_response.py
new file mode 100644
index 00000000..00738f68
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_list_response.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ....._models import BaseModel
+from ....shared.api_meta import APIMeta
+from ....shared.api_links import APILinks
+from ....api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["KeyListResponse"]
+
+
+class KeyListResponse(BaseModel):
+ api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None
+ """Api key infos"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_retrieve_response.py
new file mode 100644
index 00000000..9ba42cd2
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_retrieve_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["KeyRetrieveResponse"]
+
+
+class KeyRetrieveResponse(BaseModel):
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_params.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_params.py
new file mode 100644
index 00000000..3960cf36
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_params.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ....._utils import PropertyInfo
+
+__all__ = ["KeyUpdateParams"]
+
+
+class KeyUpdateParams(TypedDict, total=False):
+ api_key: str
+ """OpenAI API key"""
+
+ body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
+ """API key ID"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_response.py b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_response.py
new file mode 100644
index 00000000..222a8416
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/openai/key_update_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ....._models import BaseModel
+from ....api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["KeyUpdateResponse"]
+
+
+class KeyUpdateResponse(BaseModel):
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/src/do_gradientai/types/databases/__init__.py b/src/do_gradientai/types/databases/__init__.py
new file mode 100644
index 00000000..f8ee8b14
--- /dev/null
+++ b/src/do_gradientai/types/databases/__init__.py
@@ -0,0 +1,3 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
diff --git a/src/do_gradientai/types/databases/schema_registry/__init__.py b/src/do_gradientai/types/databases/schema_registry/__init__.py
new file mode 100644
index 00000000..92c4e7a5
--- /dev/null
+++ b/src/do_gradientai/types/databases/schema_registry/__init__.py
@@ -0,0 +1,10 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .config_update_params import ConfigUpdateParams as ConfigUpdateParams
+from .config_update_response import ConfigUpdateResponse as ConfigUpdateResponse
+from .config_retrieve_response import ConfigRetrieveResponse as ConfigRetrieveResponse
+from .config_update_subject_params import ConfigUpdateSubjectParams as ConfigUpdateSubjectParams
+from .config_update_subject_response import ConfigUpdateSubjectResponse as ConfigUpdateSubjectResponse
+from .config_retrieve_subject_response import ConfigRetrieveSubjectResponse as ConfigRetrieveSubjectResponse
diff --git a/src/do_gradientai/types/databases/schema_registry/config_retrieve_response.py b/src/do_gradientai/types/databases/schema_registry/config_retrieve_response.py
new file mode 100644
index 00000000..583e4eec
--- /dev/null
+++ b/src/do_gradientai/types/databases/schema_registry/config_retrieve_response.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConfigRetrieveResponse"]
+
+
+class ConfigRetrieveResponse(BaseModel):
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ]
+ """The compatibility level of the schema registry."""
diff --git a/src/do_gradientai/types/databases/schema_registry/config_retrieve_subject_response.py b/src/do_gradientai/types/databases/schema_registry/config_retrieve_subject_response.py
new file mode 100644
index 00000000..ec9fea68
--- /dev/null
+++ b/src/do_gradientai/types/databases/schema_registry/config_retrieve_subject_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConfigRetrieveSubjectResponse"]
+
+
+class ConfigRetrieveSubjectResponse(BaseModel):
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ]
+ """The compatibility level of the schema registry."""
+
+ subject_name: str
+ """The name of the schema subject."""
diff --git a/src/do_gradientai/types/databases/schema_registry/config_update_params.py b/src/do_gradientai/types/databases/schema_registry/config_update_params.py
new file mode 100644
index 00000000..b25c7e92
--- /dev/null
+++ b/src/do_gradientai/types/databases/schema_registry/config_update_params.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ConfigUpdateParams"]
+
+
+class ConfigUpdateParams(TypedDict, total=False):
+ compatibility_level: Required[
+ Literal["NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"]
+ ]
+ """The compatibility level of the schema registry."""
diff --git a/src/do_gradientai/types/databases/schema_registry/config_update_response.py b/src/do_gradientai/types/databases/schema_registry/config_update_response.py
new file mode 100644
index 00000000..0df776af
--- /dev/null
+++ b/src/do_gradientai/types/databases/schema_registry/config_update_response.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConfigUpdateResponse"]
+
+
+class ConfigUpdateResponse(BaseModel):
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ]
+ """The compatibility level of the schema registry."""
diff --git a/src/do_gradientai/types/databases/schema_registry/config_update_subject_params.py b/src/do_gradientai/types/databases/schema_registry/config_update_subject_params.py
new file mode 100644
index 00000000..b935ba80
--- /dev/null
+++ b/src/do_gradientai/types/databases/schema_registry/config_update_subject_params.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ConfigUpdateSubjectParams"]
+
+
+class ConfigUpdateSubjectParams(TypedDict, total=False):
+ database_cluster_uuid: Required[str]
+
+ compatibility_level: Required[
+ Literal["NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"]
+ ]
+ """The compatibility level of the schema registry."""
diff --git a/src/do_gradientai/types/databases/schema_registry/config_update_subject_response.py b/src/do_gradientai/types/databases/schema_registry/config_update_subject_response.py
new file mode 100644
index 00000000..3bb3cd24
--- /dev/null
+++ b/src/do_gradientai/types/databases/schema_registry/config_update_subject_response.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["ConfigUpdateSubjectResponse"]
+
+
+class ConfigUpdateSubjectResponse(BaseModel):
+ compatibility_level: Literal[
+ "NONE", "BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE"
+ ]
+ """The compatibility level of the schema registry."""
+
+ subject_name: str
+ """The name of the schema subject."""
diff --git a/tests/api_resources/agents/evaluation_metrics/anthropic/__init__.py b/tests/api_resources/agents/evaluation_metrics/anthropic/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/anthropic/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py b/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py
new file mode 100644
index 00000000..aff153a6
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/anthropic/test_keys.py
@@ -0,0 +1,557 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.agents.evaluation_metrics.anthropic import (
+ KeyListResponse,
+ KeyCreateResponse,
+ KeyDeleteResponse,
+ KeyUpdateResponse,
+ KeyRetrieveResponse,
+ KeyListAgentsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestKeys:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.create()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create_with_all_params(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.create(
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_create(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_create(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_retrieve(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.retrieve(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_retrieve(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_retrieve(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.retrieve(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_retrieve(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update_with_all_params(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_update(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_update(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_update(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.list()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_with_all_params(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_delete(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.delete(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_delete(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_delete(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.delete(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_delete(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_agents(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_agents_with_all_params(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.anthropic.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list_agents(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list_agents(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_list_agents(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents(
+ uuid="",
+ )
+
+
+class TestAsyncKeys:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.create()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.create(
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.retrieve(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.retrieve(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.list()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.delete(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.delete(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.anthropic.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.anthropic.keys.with_streaming_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.anthropic.keys.with_raw_response.list_agents(
+ uuid="",
+ )
diff --git a/tests/api_resources/agents/evaluation_metrics/openai/__init__.py b/tests/api_resources/agents/evaluation_metrics/openai/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/openai/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py b/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py
new file mode 100644
index 00000000..08404acc
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/openai/test_keys.py
@@ -0,0 +1,557 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.agents.evaluation_metrics.openai import (
+ KeyListResponse,
+ KeyCreateResponse,
+ KeyDeleteResponse,
+ KeyUpdateResponse,
+ KeyRetrieveResponse,
+ KeyListAgentsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestKeys:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.create()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create_with_all_params(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.create(
+ api_key='"sk-proj--123456789098765432123456789"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_create(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.openai.keys.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_create(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.openai.keys.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_retrieve(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.retrieve(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_retrieve(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_retrieve(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.openai.keys.with_streaming_response.retrieve(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_retrieve(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update_with_all_params(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_update(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.openai.keys.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_update(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.openai.keys.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_update(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ client.agents.evaluation_metrics.openai.keys.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.list()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_with_all_params(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.openai.keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.openai.keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_delete(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.delete(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_delete(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.openai.keys.with_raw_response.delete(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_delete(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.openai.keys.with_streaming_response.delete(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_delete(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.agents.evaluation_metrics.openai.keys.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_agents(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_agents_with_all_params(self, client: GradientAI) -> None:
+ key = client.agents.evaluation_metrics.openai.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list_agents(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list_agents(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.openai.keys.with_streaming_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_list_agents(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents(
+ uuid="",
+ )
+
+
+class TestAsyncKeys:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.create()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.create(
+ api_key='"sk-proj--123456789098765432123456789"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.retrieve(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.retrieve(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.list()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.delete(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.delete(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.delete(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.agents.evaluation_metrics.openai.keys.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.openai.keys.with_streaming_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.openai.keys.with_raw_response.list_agents(
+ uuid="",
+ )
diff --git a/tests/api_resources/databases/__init__.py b/tests/api_resources/databases/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/databases/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/databases/schema_registry/__init__.py b/tests/api_resources/databases/schema_registry/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/databases/schema_registry/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/databases/schema_registry/test_config.py b/tests/api_resources/databases/schema_registry/test_config.py
new file mode 100644
index 00000000..f63d62c0
--- /dev/null
+++ b/tests/api_resources/databases/schema_registry/test_config.py
@@ -0,0 +1,423 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.databases.schema_registry import (
+ ConfigUpdateResponse,
+ ConfigRetrieveResponse,
+ ConfigUpdateSubjectResponse,
+ ConfigRetrieveSubjectResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestConfig:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_retrieve(self, client: GradientAI) -> None:
+ config = client.databases.schema_registry.config.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(ConfigRetrieveResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_retrieve(self, client: GradientAI) -> None:
+ response = client.databases.schema_registry.config.with_raw_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = response.parse()
+ assert_matches_type(ConfigRetrieveResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_retrieve(self, client: GradientAI) -> None:
+ with client.databases.schema_registry.config.with_streaming_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = response.parse()
+ assert_matches_type(ConfigRetrieveResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_retrieve(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ client.databases.schema_registry.config.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update(self, client: GradientAI) -> None:
+ config = client.databases.schema_registry.config.update(
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+ assert_matches_type(ConfigUpdateResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_update(self, client: GradientAI) -> None:
+ response = client.databases.schema_registry.config.with_raw_response.update(
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = response.parse()
+ assert_matches_type(ConfigUpdateResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_update(self, client: GradientAI) -> None:
+ with client.databases.schema_registry.config.with_streaming_response.update(
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = response.parse()
+ assert_matches_type(ConfigUpdateResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_update(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ client.databases.schema_registry.config.with_raw_response.update(
+ database_cluster_uuid="",
+ compatibility_level="BACKWARD",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_retrieve_subject(self, client: GradientAI) -> None:
+ config = client.databases.schema_registry.config.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+ assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_retrieve_subject(self, client: GradientAI) -> None:
+ response = client.databases.schema_registry.config.with_raw_response.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = response.parse()
+ assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_retrieve_subject(self, client: GradientAI) -> None:
+ with client.databases.schema_registry.config.with_streaming_response.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = response.parse()
+ assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_retrieve_subject(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ client.databases.schema_registry.config.with_raw_response.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `subject_name` but received ''"):
+ client.databases.schema_registry.config.with_raw_response.retrieve_subject(
+ subject_name="",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update_subject(self, client: GradientAI) -> None:
+ config = client.databases.schema_registry.config.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+ assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_update_subject(self, client: GradientAI) -> None:
+ response = client.databases.schema_registry.config.with_raw_response.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = response.parse()
+ assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_update_subject(self, client: GradientAI) -> None:
+ with client.databases.schema_registry.config.with_streaming_response.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = response.parse()
+ assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_update_subject(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ client.databases.schema_registry.config.with_raw_response.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="",
+ compatibility_level="BACKWARD",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `subject_name` but received ''"):
+ client.databases.schema_registry.config.with_raw_response.update_subject(
+ subject_name="",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+
+
+class TestAsyncConfig:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
+ config = await async_client.databases.schema_registry.config.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(ConfigRetrieveResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.databases.schema_registry.config.with_raw_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = await response.parse()
+ assert_matches_type(ConfigRetrieveResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.databases.schema_registry.config.with_streaming_response.retrieve(
+ "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = await response.parse()
+ assert_matches_type(ConfigRetrieveResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ await async_client.databases.schema_registry.config.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradientAI) -> None:
+ config = await async_client.databases.schema_registry.config.update(
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+ assert_matches_type(ConfigUpdateResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.databases.schema_registry.config.with_raw_response.update(
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = await response.parse()
+ assert_matches_type(ConfigUpdateResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.databases.schema_registry.config.with_streaming_response.update(
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = await response.parse()
+ assert_matches_type(ConfigUpdateResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ await async_client.databases.schema_registry.config.with_raw_response.update(
+ database_cluster_uuid="",
+ compatibility_level="BACKWARD",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_retrieve_subject(self, async_client: AsyncGradientAI) -> None:
+ config = await async_client.databases.schema_registry.config.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+ assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_retrieve_subject(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.databases.schema_registry.config.with_raw_response.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = await response.parse()
+ assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_retrieve_subject(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.databases.schema_registry.config.with_streaming_response.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = await response.parse()
+ assert_matches_type(ConfigRetrieveSubjectResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_retrieve_subject(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ await async_client.databases.schema_registry.config.with_raw_response.retrieve_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `subject_name` but received ''"):
+ await async_client.databases.schema_registry.config.with_raw_response.retrieve_subject(
+ subject_name="",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update_subject(self, async_client: AsyncGradientAI) -> None:
+ config = await async_client.databases.schema_registry.config.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+ assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_update_subject(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.databases.schema_registry.config.with_raw_response.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ config = await response.parse()
+ assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_update_subject(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.databases.schema_registry.config.with_streaming_response.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ config = await response.parse()
+ assert_matches_type(ConfigUpdateSubjectResponse, config, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_update_subject(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `database_cluster_uuid` but received ''"):
+ await async_client.databases.schema_registry.config.with_raw_response.update_subject(
+ subject_name="customer-schema",
+ database_cluster_uuid="",
+ compatibility_level="BACKWARD",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `subject_name` but received ''"):
+ await async_client.databases.schema_registry.config.with_raw_response.update_subject(
+ subject_name="",
+ database_cluster_uuid="9cc10173-e9ea-4176-9dbc-a4cee4c4ff30",
+ compatibility_level="BACKWARD",
+ )
From 3018b4cc758839eda46617170a24f181d9a0b70b Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 29 Jul 2025 19:19:07 +0000
Subject: [PATCH 05/10] feat(api): update via SDK Studio
---
.stats.yml | 2 +-
api.md | 62 ++
src/do_gradientai/_client.py | 39 +-
src/do_gradientai/resources/__init__.py | 14 +
.../resources/models/__init__.py | 33 +
src/do_gradientai/resources/models/models.py | 286 +++++++
.../resources/models/providers/__init__.py | 47 ++
.../resources/models/providers/anthropic.py | 711 ++++++++++++++++++
.../resources/models/providers/openai.py | 707 +++++++++++++++++
.../resources/models/providers/providers.py | 134 ++++
src/do_gradientai/types/__init__.py | 12 +
.../evaluation_metrics/model_list_response.py | 59 +-
src/do_gradientai/types/api_agent_model.py | 29 +-
src/do_gradientai/types/api_agreement.py | 17 +
src/do_gradientai/types/api_model.py | 42 ++
src/do_gradientai/types/api_model_version.py | 18 +
src/do_gradientai/types/model_list_params.py | 42 ++
.../types/model_list_response.py | 21 +
src/do_gradientai/types/models/__init__.py | 3 +
.../types/models/providers/__init__.py | 24 +
.../providers/anthropic_create_params.py | 15 +
.../providers/anthropic_create_response.py | 13 +
.../providers/anthropic_delete_response.py | 13 +
.../providers/anthropic_list_agents_params.py | 15 +
.../anthropic_list_agents_response.py | 24 +
.../models/providers/anthropic_list_params.py | 15 +
.../providers/anthropic_list_response.py | 21 +
.../providers/anthropic_retrieve_response.py | 13 +
.../providers/anthropic_update_params.py | 20 +
.../providers/anthropic_update_response.py | 13 +
.../models/providers/openai_create_params.py | 15 +
.../providers/openai_create_response.py | 13 +
.../providers/openai_delete_response.py | 13 +
.../models/providers/openai_list_params.py | 15 +
.../models/providers/openai_list_response.py | 21 +
.../openai_retrieve_agents_params.py | 15 +
.../openai_retrieve_agents_response.py | 24 +
.../providers/openai_retrieve_response.py | 13 +
.../models/providers/openai_update_params.py | 20 +
.../providers/openai_update_response.py | 13 +
tests/api_resources/models/__init__.py | 1 +
.../models/providers/__init__.py | 1 +
.../models/providers/test_anthropic.py | 557 ++++++++++++++
.../models/providers/test_openai.py | 557 ++++++++++++++
tests/api_resources/test_models.py | 102 +++
45 files changed, 3762 insertions(+), 82 deletions(-)
create mode 100644 src/do_gradientai/resources/models/__init__.py
create mode 100644 src/do_gradientai/resources/models/models.py
create mode 100644 src/do_gradientai/resources/models/providers/__init__.py
create mode 100644 src/do_gradientai/resources/models/providers/anthropic.py
create mode 100644 src/do_gradientai/resources/models/providers/openai.py
create mode 100644 src/do_gradientai/resources/models/providers/providers.py
create mode 100644 src/do_gradientai/types/api_agreement.py
create mode 100644 src/do_gradientai/types/api_model.py
create mode 100644 src/do_gradientai/types/api_model_version.py
create mode 100644 src/do_gradientai/types/model_list_params.py
create mode 100644 src/do_gradientai/types/model_list_response.py
create mode 100644 src/do_gradientai/types/models/__init__.py
create mode 100644 src/do_gradientai/types/models/providers/__init__.py
create mode 100644 src/do_gradientai/types/models/providers/anthropic_create_params.py
create mode 100644 src/do_gradientai/types/models/providers/anthropic_create_response.py
create mode 100644 src/do_gradientai/types/models/providers/anthropic_delete_response.py
create mode 100644 src/do_gradientai/types/models/providers/anthropic_list_agents_params.py
create mode 100644 src/do_gradientai/types/models/providers/anthropic_list_agents_response.py
create mode 100644 src/do_gradientai/types/models/providers/anthropic_list_params.py
create mode 100644 src/do_gradientai/types/models/providers/anthropic_list_response.py
create mode 100644 src/do_gradientai/types/models/providers/anthropic_retrieve_response.py
create mode 100644 src/do_gradientai/types/models/providers/anthropic_update_params.py
create mode 100644 src/do_gradientai/types/models/providers/anthropic_update_response.py
create mode 100644 src/do_gradientai/types/models/providers/openai_create_params.py
create mode 100644 src/do_gradientai/types/models/providers/openai_create_response.py
create mode 100644 src/do_gradientai/types/models/providers/openai_delete_response.py
create mode 100644 src/do_gradientai/types/models/providers/openai_list_params.py
create mode 100644 src/do_gradientai/types/models/providers/openai_list_response.py
create mode 100644 src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py
create mode 100644 src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py
create mode 100644 src/do_gradientai/types/models/providers/openai_retrieve_response.py
create mode 100644 src/do_gradientai/types/models/providers/openai_update_params.py
create mode 100644 src/do_gradientai/types/models/providers/openai_update_response.py
create mode 100644 tests/api_resources/models/__init__.py
create mode 100644 tests/api_resources/models/providers/__init__.py
create mode 100644 tests/api_resources/models/providers/test_anthropic.py
create mode 100644 tests/api_resources/models/providers/test_openai.py
create mode 100644 tests/api_resources/test_models.py
diff --git a/.stats.yml b/.stats.yml
index d1c0ec36..1a7efa56 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 170
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-015417b36365dfcb32166e67379c38de8bf5127c33dff646097a819a7b4dc588.yml
openapi_spec_hash: d7d811c13cc79f15d82fe680cf425859
-config_hash: debcb81a744e9f69195a635bb70d45eb
+config_hash: 9e90c31a8b4d524c6714fa969828ecc8
diff --git a/api.md b/api.md
index 297f0021..dc52233d 100644
--- a/api.md
+++ b/api.md
@@ -847,6 +847,68 @@ Methods:
- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse
- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse
+# Models
+
+Types:
+
+```python
+from do_gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse
+```
+
+Methods:
+
+- client.models.list(\*\*params) -> ModelListResponse
+
+## Providers
+
+### Anthropic
+
+Types:
+
+```python
+from do_gradientai.types.models.providers import (
+ AnthropicCreateResponse,
+ AnthropicRetrieveResponse,
+ AnthropicUpdateResponse,
+ AnthropicListResponse,
+ AnthropicDeleteResponse,
+ AnthropicListAgentsResponse,
+)
+```
+
+Methods:
+
+- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse
+- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse
+- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse
+- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse
+- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse
+- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse
+
+### OpenAI
+
+Types:
+
+```python
+from do_gradientai.types.models.providers import (
+ OpenAICreateResponse,
+ OpenAIRetrieveResponse,
+ OpenAIUpdateResponse,
+ OpenAIListResponse,
+ OpenAIDeleteResponse,
+ OpenAIRetrieveAgentsResponse,
+)
+```
+
+Methods:
+
+- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse
+- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse
+- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse
+- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse
+- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse
+- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse
+
# Regions
Types:
diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py
index 31b4ee75..9bd9d0c7 100644
--- a/src/do_gradientai/_client.py
+++ b/src/do_gradientai/_client.py
@@ -32,7 +32,7 @@
)
if TYPE_CHECKING:
- from .resources import chat, agents, regions, databases, inference, gpu_droplets, knowledge_bases
+ from .resources import chat, agents, models, regions, databases, inference, gpu_droplets, knowledge_bases
from .resources.regions import RegionsResource, AsyncRegionsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
from .resources.gpu_droplets import (
@@ -48,6 +48,7 @@
load_balancers,
)
from .resources.agents.agents import AgentsResource, AsyncAgentsResource
+ from .resources.models.models import ModelsResource, AsyncModelsResource
from .resources.databases.databases import DatabasesResource, AsyncDatabasesResource
from .resources.inference.inference import InferenceResource, AsyncInferenceResource
from .resources.gpu_droplets.snapshots import (
@@ -211,6 +212,12 @@ def knowledge_bases(self) -> KnowledgeBasesResource:
return KnowledgeBasesResource(self)
+ @cached_property
+ def models(self) -> ModelsResource:
+ from .resources.models import ModelsResource
+
+ return ModelsResource(self)
+
@cached_property
def regions(self) -> RegionsResource:
from .resources.regions import RegionsResource
@@ -521,6 +528,12 @@ def knowledge_bases(self) -> AsyncKnowledgeBasesResource:
return AsyncKnowledgeBasesResource(self)
+ @cached_property
+ def models(self) -> AsyncModelsResource:
+ from .resources.models import AsyncModelsResource
+
+ return AsyncModelsResource(self)
+
@cached_property
def regions(self) -> AsyncRegionsResource:
from .resources.regions import AsyncRegionsResource
@@ -703,6 +716,12 @@ def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawRespon
return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases)
+ @cached_property
+ def models(self) -> models.ModelsResourceWithRawResponse:
+ from .resources.models import ModelsResourceWithRawResponse
+
+ return ModelsResourceWithRawResponse(self._client.models)
+
@cached_property
def regions(self) -> regions.RegionsResourceWithRawResponse:
from .resources.regions import RegionsResourceWithRawResponse
@@ -751,6 +770,12 @@ def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawR
return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases)
+ @cached_property
+ def models(self) -> models.AsyncModelsResourceWithRawResponse:
+ from .resources.models import AsyncModelsResourceWithRawResponse
+
+ return AsyncModelsResourceWithRawResponse(self._client.models)
+
@cached_property
def regions(self) -> regions.AsyncRegionsResourceWithRawResponse:
from .resources.regions import AsyncRegionsResourceWithRawResponse
@@ -799,6 +824,12 @@ def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreaming
return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases)
+ @cached_property
+ def models(self) -> models.ModelsResourceWithStreamingResponse:
+ from .resources.models import ModelsResourceWithStreamingResponse
+
+ return ModelsResourceWithStreamingResponse(self._client.models)
+
@cached_property
def regions(self) -> regions.RegionsResourceWithStreamingResponse:
from .resources.regions import RegionsResourceWithStreamingResponse
@@ -847,6 +878,12 @@ def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStre
return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases)
+ @cached_property
+ def models(self) -> models.AsyncModelsResourceWithStreamingResponse:
+ from .resources.models import AsyncModelsResourceWithStreamingResponse
+
+ return AsyncModelsResourceWithStreamingResponse(self._client.models)
+
@cached_property
def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse:
from .resources.regions import AsyncRegionsResourceWithStreamingResponse
diff --git a/src/do_gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py
index 9cc8a609..d5198560 100644
--- a/src/do_gradientai/resources/__init__.py
+++ b/src/do_gradientai/resources/__init__.py
@@ -16,6 +16,14 @@
AgentsResourceWithStreamingResponse,
AsyncAgentsResourceWithStreamingResponse,
)
+from .models import (
+ ModelsResource,
+ AsyncModelsResource,
+ ModelsResourceWithRawResponse,
+ AsyncModelsResourceWithRawResponse,
+ ModelsResourceWithStreamingResponse,
+ AsyncModelsResourceWithStreamingResponse,
+)
from .regions import (
RegionsResource,
AsyncRegionsResource,
@@ -88,6 +96,12 @@
"AsyncKnowledgeBasesResourceWithRawResponse",
"KnowledgeBasesResourceWithStreamingResponse",
"AsyncKnowledgeBasesResourceWithStreamingResponse",
+ "ModelsResource",
+ "AsyncModelsResource",
+ "ModelsResourceWithRawResponse",
+ "AsyncModelsResourceWithRawResponse",
+ "ModelsResourceWithStreamingResponse",
+ "AsyncModelsResourceWithStreamingResponse",
"RegionsResource",
"AsyncRegionsResource",
"RegionsResourceWithRawResponse",
diff --git a/src/do_gradientai/resources/models/__init__.py b/src/do_gradientai/resources/models/__init__.py
new file mode 100644
index 00000000..e30dd201
--- /dev/null
+++ b/src/do_gradientai/resources/models/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .models import (
+ ModelsResource,
+ AsyncModelsResource,
+ ModelsResourceWithRawResponse,
+ AsyncModelsResourceWithRawResponse,
+ ModelsResourceWithStreamingResponse,
+ AsyncModelsResourceWithStreamingResponse,
+)
+from .providers import (
+ ProvidersResource,
+ AsyncProvidersResource,
+ ProvidersResourceWithRawResponse,
+ AsyncProvidersResourceWithRawResponse,
+ ProvidersResourceWithStreamingResponse,
+ AsyncProvidersResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "ProvidersResource",
+ "AsyncProvidersResource",
+ "ProvidersResourceWithRawResponse",
+ "AsyncProvidersResourceWithRawResponse",
+ "ProvidersResourceWithStreamingResponse",
+ "AsyncProvidersResourceWithStreamingResponse",
+ "ModelsResource",
+ "AsyncModelsResource",
+ "ModelsResourceWithRawResponse",
+ "AsyncModelsResourceWithRawResponse",
+ "ModelsResourceWithStreamingResponse",
+ "AsyncModelsResourceWithStreamingResponse",
+]
diff --git a/src/do_gradientai/resources/models/models.py b/src/do_gradientai/resources/models/models.py
new file mode 100644
index 00000000..3800c03c
--- /dev/null
+++ b/src/do_gradientai/resources/models/models.py
@@ -0,0 +1,286 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal
+
+import httpx
+
+from ...types import model_list_params
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from .providers.providers import (
+ ProvidersResource,
+ AsyncProvidersResource,
+ ProvidersResourceWithRawResponse,
+ AsyncProvidersResourceWithRawResponse,
+ ProvidersResourceWithStreamingResponse,
+ AsyncProvidersResourceWithStreamingResponse,
+)
+from ...types.model_list_response import ModelListResponse
+
+__all__ = ["ModelsResource", "AsyncModelsResource"]
+
+
+class ModelsResource(SyncAPIResource):
+ @cached_property
+ def providers(self) -> ProvidersResource:
+ return ProvidersResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> ModelsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return ModelsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return ModelsResourceWithStreamingResponse(self)
+
+ def list(
+ self,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ public_only: bool | NotGiven = NOT_GIVEN,
+ usecases: List[
+ Literal[
+ "MODEL_USECASE_UNKNOWN",
+ "MODEL_USECASE_AGENT",
+ "MODEL_USECASE_FINETUNED",
+ "MODEL_USECASE_KNOWLEDGEBASE",
+ "MODEL_USECASE_GUARDRAIL",
+ "MODEL_USECASE_REASONING",
+ "MODEL_USECASE_SERVERLESS",
+ ]
+ ]
+ | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ModelListResponse:
+ """
+ To list all models, send a GET request to `/v2/gen-ai/models`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ public_only: Only include models that are publicly available.
+
+ usecases: Include only models defined for the listed usecases.
+
+ - MODEL_USECASE_UNKNOWN: The use case of the model is unknown
+ - MODEL_USECASE_AGENT: The model maybe used in an agent
+ - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning
+ - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases
+ (embedding models)
+ - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails
+ - MODEL_USECASE_REASONING: The model usecase for reasoning
+ - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/models"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/models",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ "public_only": public_only,
+ "usecases": usecases,
+ },
+ model_list_params.ModelListParams,
+ ),
+ ),
+ cast_to=ModelListResponse,
+ )
+
+
+class AsyncModelsResource(AsyncAPIResource):
+ @cached_property
+ def providers(self) -> AsyncProvidersResource:
+ return AsyncProvidersResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncModelsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncModelsResourceWithStreamingResponse(self)
+
+ async def list(
+ self,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ public_only: bool | NotGiven = NOT_GIVEN,
+ usecases: List[
+ Literal[
+ "MODEL_USECASE_UNKNOWN",
+ "MODEL_USECASE_AGENT",
+ "MODEL_USECASE_FINETUNED",
+ "MODEL_USECASE_KNOWLEDGEBASE",
+ "MODEL_USECASE_GUARDRAIL",
+ "MODEL_USECASE_REASONING",
+ "MODEL_USECASE_SERVERLESS",
+ ]
+ ]
+ | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ModelListResponse:
+ """
+ To list all models, send a GET request to `/v2/gen-ai/models`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ public_only: Only include models that are publicly available.
+
+ usecases: Include only models defined for the listed usecases.
+
+ - MODEL_USECASE_UNKNOWN: The use case of the model is unknown
+ - MODEL_USECASE_AGENT: The model maybe used in an agent
+ - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning
+ - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases
+ (embedding models)
+ - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails
+ - MODEL_USECASE_REASONING: The model usecase for reasoning
+ - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/models"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/models",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ "public_only": public_only,
+ "usecases": usecases,
+ },
+ model_list_params.ModelListParams,
+ ),
+ ),
+ cast_to=ModelListResponse,
+ )
+
+
+class ModelsResourceWithRawResponse:
+ def __init__(self, models: ModelsResource) -> None:
+ self._models = models
+
+ self.list = to_raw_response_wrapper(
+ models.list,
+ )
+
+ @cached_property
+ def providers(self) -> ProvidersResourceWithRawResponse:
+ return ProvidersResourceWithRawResponse(self._models.providers)
+
+
+class AsyncModelsResourceWithRawResponse:
+ def __init__(self, models: AsyncModelsResource) -> None:
+ self._models = models
+
+ self.list = async_to_raw_response_wrapper(
+ models.list,
+ )
+
+ @cached_property
+ def providers(self) -> AsyncProvidersResourceWithRawResponse:
+ return AsyncProvidersResourceWithRawResponse(self._models.providers)
+
+
+class ModelsResourceWithStreamingResponse:
+ def __init__(self, models: ModelsResource) -> None:
+ self._models = models
+
+ self.list = to_streamed_response_wrapper(
+ models.list,
+ )
+
+ @cached_property
+ def providers(self) -> ProvidersResourceWithStreamingResponse:
+ return ProvidersResourceWithStreamingResponse(self._models.providers)
+
+
+class AsyncModelsResourceWithStreamingResponse:
+ def __init__(self, models: AsyncModelsResource) -> None:
+ self._models = models
+
+ self.list = async_to_streamed_response_wrapper(
+ models.list,
+ )
+
+ @cached_property
+ def providers(self) -> AsyncProvidersResourceWithStreamingResponse:
+ return AsyncProvidersResourceWithStreamingResponse(self._models.providers)
diff --git a/src/do_gradientai/resources/models/providers/__init__.py b/src/do_gradientai/resources/models/providers/__init__.py
new file mode 100644
index 00000000..1731e057
--- /dev/null
+++ b/src/do_gradientai/resources/models/providers/__init__.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .openai import (
+ OpenAIResource,
+ AsyncOpenAIResource,
+ OpenAIResourceWithRawResponse,
+ AsyncOpenAIResourceWithRawResponse,
+ OpenAIResourceWithStreamingResponse,
+ AsyncOpenAIResourceWithStreamingResponse,
+)
+from .anthropic import (
+ AnthropicResource,
+ AsyncAnthropicResource,
+ AnthropicResourceWithRawResponse,
+ AsyncAnthropicResourceWithRawResponse,
+ AnthropicResourceWithStreamingResponse,
+ AsyncAnthropicResourceWithStreamingResponse,
+)
+from .providers import (
+ ProvidersResource,
+ AsyncProvidersResource,
+ ProvidersResourceWithRawResponse,
+ AsyncProvidersResourceWithRawResponse,
+ ProvidersResourceWithStreamingResponse,
+ AsyncProvidersResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "AnthropicResource",
+ "AsyncAnthropicResource",
+ "AnthropicResourceWithRawResponse",
+ "AsyncAnthropicResourceWithRawResponse",
+ "AnthropicResourceWithStreamingResponse",
+ "AsyncAnthropicResourceWithStreamingResponse",
+ "OpenAIResource",
+ "AsyncOpenAIResource",
+ "OpenAIResourceWithRawResponse",
+ "AsyncOpenAIResourceWithRawResponse",
+ "OpenAIResourceWithStreamingResponse",
+ "AsyncOpenAIResourceWithStreamingResponse",
+ "ProvidersResource",
+ "AsyncProvidersResource",
+ "ProvidersResourceWithRawResponse",
+ "AsyncProvidersResourceWithRawResponse",
+ "ProvidersResourceWithStreamingResponse",
+ "AsyncProvidersResourceWithStreamingResponse",
+]
diff --git a/src/do_gradientai/resources/models/providers/anthropic.py b/src/do_gradientai/resources/models/providers/anthropic.py
new file mode 100644
index 00000000..e570be51
--- /dev/null
+++ b/src/do_gradientai/resources/models/providers/anthropic.py
@@ -0,0 +1,711 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.models.providers import (
+ anthropic_list_params,
+ anthropic_create_params,
+ anthropic_update_params,
+ anthropic_list_agents_params,
+)
+from ....types.models.providers.anthropic_list_response import AnthropicListResponse
+from ....types.models.providers.anthropic_create_response import AnthropicCreateResponse
+from ....types.models.providers.anthropic_delete_response import AnthropicDeleteResponse
+from ....types.models.providers.anthropic_update_response import AnthropicUpdateResponse
+from ....types.models.providers.anthropic_retrieve_response import AnthropicRetrieveResponse
+from ....types.models.providers.anthropic_list_agents_response import AnthropicListAgentsResponse
+
+__all__ = ["AnthropicResource", "AsyncAnthropicResource"]
+
+
+class AnthropicResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AnthropicResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AnthropicResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AnthropicResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AnthropicCreateResponse:
+ """
+ To create an Anthropic API key, send a POST request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ api_key: Anthropic API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ anthropic_create_params.AnthropicCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AnthropicRetrieveResponse:
+ """
+ To retrieve details of an Anthropic API key, send a GET request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicRetrieveResponse,
+ )
+
+ def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ body_api_key_uuid: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AnthropicUpdateResponse:
+ """
+ To update an Anthropic API key, send a PUT request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: Anthropic API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ anthropic_update_params.AnthropicUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AnthropicListResponse:
+ """
+ To list all Anthropic API keys, send a GET request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ anthropic_list_params.AnthropicListParams,
+ ),
+ ),
+ cast_to=AnthropicListResponse,
+ )
+
+ def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AnthropicDeleteResponse:
+ """
+ To delete an Anthropic API key, send a DELETE request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicDeleteResponse,
+ )
+
+ def list_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AnthropicListAgentsResponse:
+ """
+ List Agents by Anthropic Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/anthropic/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ anthropic_list_agents_params.AnthropicListAgentsParams,
+ ),
+ ),
+ cast_to=AnthropicListAgentsResponse,
+ )
+
+
+class AsyncAnthropicResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAnthropicResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncAnthropicResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AnthropicCreateResponse:
+ """
+ To create an Anthropic API key, send a POST request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ api_key: Anthropic API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ anthropic_create_params.AnthropicCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AnthropicRetrieveResponse:
+ """
+ To retrieve details of an Anthropic API key, send a GET request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ body_api_key_uuid: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AnthropicUpdateResponse:
+ """
+ To update an Anthropic API key, send a PUT request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: Anthropic API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ anthropic_update_params.AnthropicUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AnthropicListResponse:
+ """
+ To list all Anthropic API keys, send a GET request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ anthropic_list_params.AnthropicListParams,
+ ),
+ ),
+ cast_to=AnthropicListResponse,
+ )
+
+ async def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AnthropicDeleteResponse:
+ """
+ To delete an Anthropic API key, send a DELETE request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnthropicDeleteResponse,
+ )
+
+ async def list_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AnthropicListAgentsResponse:
+ """
+ List Agents by Anthropic Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/anthropic/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ anthropic_list_agents_params.AnthropicListAgentsParams,
+ ),
+ ),
+ cast_to=AnthropicListAgentsResponse,
+ )
+
+
+class AnthropicResourceWithRawResponse:
+ def __init__(self, anthropic: AnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ self.create = to_raw_response_wrapper(
+ anthropic.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ anthropic.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ anthropic.update,
+ )
+ self.list = to_raw_response_wrapper(
+ anthropic.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ anthropic.delete,
+ )
+ self.list_agents = to_raw_response_wrapper(
+ anthropic.list_agents,
+ )
+
+
+class AsyncAnthropicResourceWithRawResponse:
+ def __init__(self, anthropic: AsyncAnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ self.create = async_to_raw_response_wrapper(
+ anthropic.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ anthropic.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ anthropic.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ anthropic.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ anthropic.delete,
+ )
+ self.list_agents = async_to_raw_response_wrapper(
+ anthropic.list_agents,
+ )
+
+
+class AnthropicResourceWithStreamingResponse:
+ def __init__(self, anthropic: AnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ self.create = to_streamed_response_wrapper(
+ anthropic.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ anthropic.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ anthropic.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ anthropic.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ anthropic.delete,
+ )
+ self.list_agents = to_streamed_response_wrapper(
+ anthropic.list_agents,
+ )
+
+
+class AsyncAnthropicResourceWithStreamingResponse:
+ def __init__(self, anthropic: AsyncAnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ self.create = async_to_streamed_response_wrapper(
+ anthropic.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ anthropic.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ anthropic.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ anthropic.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ anthropic.delete,
+ )
+ self.list_agents = async_to_streamed_response_wrapper(
+ anthropic.list_agents,
+ )
diff --git a/src/do_gradientai/resources/models/providers/openai.py b/src/do_gradientai/resources/models/providers/openai.py
new file mode 100644
index 00000000..ccd594b8
--- /dev/null
+++ b/src/do_gradientai/resources/models/providers/openai.py
@@ -0,0 +1,707 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.models.providers import (
+ openai_list_params,
+ openai_create_params,
+ openai_update_params,
+ openai_retrieve_agents_params,
+)
+from ....types.models.providers.openai_list_response import OpenAIListResponse
+from ....types.models.providers.openai_create_response import OpenAICreateResponse
+from ....types.models.providers.openai_delete_response import OpenAIDeleteResponse
+from ....types.models.providers.openai_update_response import OpenAIUpdateResponse
+from ....types.models.providers.openai_retrieve_response import OpenAIRetrieveResponse
+from ....types.models.providers.openai_retrieve_agents_response import OpenAIRetrieveAgentsResponse
+
+__all__ = ["OpenAIResource", "AsyncOpenAIResource"]
+
+
+class OpenAIResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> OpenAIResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return OpenAIResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return OpenAIResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> OpenAICreateResponse:
+ """
+ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ api_key: OpenAI API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ openai_create_params.OpenAICreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAICreateResponse,
+ )
+
+ def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> OpenAIRetrieveResponse:
+ """
+ To retrieve details of an OpenAI API key, send a GET request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAIRetrieveResponse,
+ )
+
+ def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ body_api_key_uuid: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> OpenAIUpdateResponse:
+ """
+ To update an OpenAI API key, send a PUT request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: OpenAI API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/openai/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ openai_update_params.OpenAIUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAIUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> OpenAIListResponse:
+ """
+ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ openai_list_params.OpenAIListParams,
+ ),
+ ),
+ cast_to=OpenAIListResponse,
+ )
+
+ def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> OpenAIDeleteResponse:
+ """
+ To delete an OpenAI API key, send a DELETE request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAIDeleteResponse,
+ )
+
+ def retrieve_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> OpenAIRetrieveAgentsResponse:
+ """
+ List Agents by OpenAI Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/openai/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ openai_retrieve_agents_params.OpenAIRetrieveAgentsParams,
+ ),
+ ),
+ cast_to=OpenAIRetrieveAgentsResponse,
+ )
+
+
+class AsyncOpenAIResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncOpenAIResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncOpenAIResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> OpenAICreateResponse:
+ """
+ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ api_key: OpenAI API key
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ openai_create_params.OpenAICreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAICreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> OpenAIRetrieveResponse:
+ """
+ To retrieve details of an OpenAI API key, send a GET request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAIRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ body_api_key_uuid: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> OpenAIUpdateResponse:
+ """
+ To update an OpenAI API key, send a PUT request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ api_key: OpenAI API key
+
+ body_api_key_uuid: API key ID
+
+ name: Name of the key
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/openai/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ openai_update_params.OpenAIUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAIUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> OpenAIListResponse:
+ """
+ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ openai_list_params.OpenAIListParams,
+ ),
+ ),
+ cast_to=OpenAIListResponse,
+ )
+
+ async def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> OpenAIDeleteResponse:
+ """
+ To delete an OpenAI API key, send a DELETE request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=OpenAIDeleteResponse,
+ )
+
+ async def retrieve_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> OpenAIRetrieveAgentsResponse:
+ """
+ List Agents by OpenAI Key.
+
+ Args:
+ page: Page number.
+
+ per_page: Items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/openai/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ openai_retrieve_agents_params.OpenAIRetrieveAgentsParams,
+ ),
+ ),
+ cast_to=OpenAIRetrieveAgentsResponse,
+ )
+
+
+class OpenAIResourceWithRawResponse:
+ def __init__(self, openai: OpenAIResource) -> None:
+ self._openai = openai
+
+ self.create = to_raw_response_wrapper(
+ openai.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ openai.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ openai.update,
+ )
+ self.list = to_raw_response_wrapper(
+ openai.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ openai.delete,
+ )
+ self.retrieve_agents = to_raw_response_wrapper(
+ openai.retrieve_agents,
+ )
+
+
+class AsyncOpenAIResourceWithRawResponse:
+ def __init__(self, openai: AsyncOpenAIResource) -> None:
+ self._openai = openai
+
+ self.create = async_to_raw_response_wrapper(
+ openai.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ openai.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ openai.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ openai.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ openai.delete,
+ )
+ self.retrieve_agents = async_to_raw_response_wrapper(
+ openai.retrieve_agents,
+ )
+
+
+class OpenAIResourceWithStreamingResponse:
+ def __init__(self, openai: OpenAIResource) -> None:
+ self._openai = openai
+
+ self.create = to_streamed_response_wrapper(
+ openai.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ openai.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ openai.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ openai.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ openai.delete,
+ )
+ self.retrieve_agents = to_streamed_response_wrapper(
+ openai.retrieve_agents,
+ )
+
+
+class AsyncOpenAIResourceWithStreamingResponse:
+ def __init__(self, openai: AsyncOpenAIResource) -> None:
+ self._openai = openai
+
+ self.create = async_to_streamed_response_wrapper(
+ openai.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ openai.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ openai.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ openai.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ openai.delete,
+ )
+ self.retrieve_agents = async_to_streamed_response_wrapper(
+ openai.retrieve_agents,
+ )
diff --git a/src/do_gradientai/resources/models/providers/providers.py b/src/do_gradientai/resources/models/providers/providers.py
new file mode 100644
index 00000000..3e3f4dde
--- /dev/null
+++ b/src/do_gradientai/resources/models/providers/providers.py
@@ -0,0 +1,134 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .openai import (
+ OpenAIResource,
+ AsyncOpenAIResource,
+ OpenAIResourceWithRawResponse,
+ AsyncOpenAIResourceWithRawResponse,
+ OpenAIResourceWithStreamingResponse,
+ AsyncOpenAIResourceWithStreamingResponse,
+)
+from .anthropic import (
+ AnthropicResource,
+ AsyncAnthropicResource,
+ AnthropicResourceWithRawResponse,
+ AsyncAnthropicResourceWithRawResponse,
+ AnthropicResourceWithStreamingResponse,
+ AsyncAnthropicResourceWithStreamingResponse,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["ProvidersResource", "AsyncProvidersResource"]
+
+
+class ProvidersResource(SyncAPIResource):
+ @cached_property
+ def anthropic(self) -> AnthropicResource:
+ return AnthropicResource(self._client)
+
+ @cached_property
+ def openai(self) -> OpenAIResource:
+ return OpenAIResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> ProvidersResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return ProvidersResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return ProvidersResourceWithStreamingResponse(self)
+
+
+class AsyncProvidersResource(AsyncAPIResource):
+ @cached_property
+ def anthropic(self) -> AsyncAnthropicResource:
+ return AsyncAnthropicResource(self._client)
+
+ @cached_property
+ def openai(self) -> AsyncOpenAIResource:
+ return AsyncOpenAIResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncProvidersResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncProvidersResourceWithStreamingResponse(self)
+
+
+class ProvidersResourceWithRawResponse:
+ def __init__(self, providers: ProvidersResource) -> None:
+ self._providers = providers
+
+ @cached_property
+ def anthropic(self) -> AnthropicResourceWithRawResponse:
+ return AnthropicResourceWithRawResponse(self._providers.anthropic)
+
+ @cached_property
+ def openai(self) -> OpenAIResourceWithRawResponse:
+ return OpenAIResourceWithRawResponse(self._providers.openai)
+
+
+class AsyncProvidersResourceWithRawResponse:
+ def __init__(self, providers: AsyncProvidersResource) -> None:
+ self._providers = providers
+
+ @cached_property
+ def anthropic(self) -> AsyncAnthropicResourceWithRawResponse:
+ return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic)
+
+ @cached_property
+ def openai(self) -> AsyncOpenAIResourceWithRawResponse:
+ return AsyncOpenAIResourceWithRawResponse(self._providers.openai)
+
+
+class ProvidersResourceWithStreamingResponse:
+ def __init__(self, providers: ProvidersResource) -> None:
+ self._providers = providers
+
+ @cached_property
+ def anthropic(self) -> AnthropicResourceWithStreamingResponse:
+ return AnthropicResourceWithStreamingResponse(self._providers.anthropic)
+
+ @cached_property
+ def openai(self) -> OpenAIResourceWithStreamingResponse:
+ return OpenAIResourceWithStreamingResponse(self._providers.openai)
+
+
+class AsyncProvidersResourceWithStreamingResponse:
+ def __init__(self, providers: AsyncProvidersResource) -> None:
+ self._providers = providers
+
+ @cached_property
+ def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse:
+ return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic)
+
+ @cached_property
+ def openai(self) -> AsyncOpenAIResourceWithStreamingResponse:
+ return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai)
diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py
index a7ed7329..0f5da788 100644
--- a/src/do_gradientai/types/__init__.py
+++ b/src/do_gradientai/types/__init__.py
@@ -4,6 +4,7 @@
from . import (
agents,
+ models,
api_agent,
api_workspace,
agent_create_response,
@@ -45,14 +46,19 @@
ChatCompletionTokenLogprob as ChatCompletionTokenLogprob,
)
from .api_agent import APIAgent as APIAgent
+from .api_model import APIModel as APIModel
+from .api_agreement import APIAgreement as APIAgreement
from .api_workspace import APIWorkspace as APIWorkspace
from .api_agent_model import APIAgentModel as APIAgentModel
from .agent_list_params import AgentListParams as AgentListParams
+from .api_model_version import APIModelVersion as APIModelVersion
+from .model_list_params import ModelListParams as ModelListParams
from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase
from .region_list_params import RegionListParams as RegionListParams
from .agent_create_params import AgentCreateParams as AgentCreateParams
from .agent_list_response import AgentListResponse as AgentListResponse
from .agent_update_params import AgentUpdateParams as AgentUpdateParams
+from .model_list_response import ModelListResponse as ModelListResponse
from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod
from .region_list_response import RegionListResponse as RegionListResponse
from .agent_create_response import AgentCreateResponse as AgentCreateResponse
@@ -193,6 +199,10 @@
agents.route_view_response.RouteViewResponse.model_rebuild(
_parent_namespace_depth=0
)
+ models.providers.anthropic_list_agents_response.AnthropicListAgentsResponse.model_rebuild(_parent_namespace_depth=0)
+ models.providers.openai_retrieve_agents_response.OpenAIRetrieveAgentsResponse.model_rebuild(
+ _parent_namespace_depth=0
+ )
else:
api_agent.APIAgent.update_forward_refs() # type: ignore
api_workspace.APIWorkspace.update_forward_refs() # type: ignore
@@ -215,3 +225,5 @@
agents.api_link_knowledge_base_output.APILinkKnowledgeBaseOutput.update_forward_refs() # type: ignore
agents.knowledge_base_detach_response.KnowledgeBaseDetachResponse.update_forward_refs() # type: ignore
agents.route_view_response.RouteViewResponse.update_forward_refs() # type: ignore
+ models.providers.anthropic_list_agents_response.AnthropicListAgentsResponse.update_forward_refs() # type: ignore
+ models.providers.openai_retrieve_agents_response.OpenAIRetrieveAgentsResponse.update_forward_refs() # type: ignore
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py
index 160e0aa3..2fc17524 100644
--- a/src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py
+++ b/src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py
@@ -1,66 +1,13 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
-from datetime import datetime
from ...._models import BaseModel
+from ...api_model import APIModel
from ...shared.api_meta import APIMeta
from ...shared.api_links import APILinks
-__all__ = ["ModelListResponse", "Model", "ModelAgreement", "ModelVersion"]
-
-
-class ModelAgreement(BaseModel):
- description: Optional[str] = None
-
- name: Optional[str] = None
-
- url: Optional[str] = None
-
- uuid: Optional[str] = None
-
-
-class ModelVersion(BaseModel):
- major: Optional[int] = None
- """Major version number"""
-
- minor: Optional[int] = None
- """Minor version number"""
-
- patch: Optional[int] = None
- """Patch version number"""
-
-
-class Model(BaseModel):
- agreement: Optional[ModelAgreement] = None
- """Agreement Description"""
-
- created_at: Optional[datetime] = None
- """Creation date / time"""
-
- is_foundational: Optional[bool] = None
- """True if it is a foundational model provided by do"""
-
- name: Optional[str] = None
- """Name of the model"""
-
- parent_uuid: Optional[str] = None
- """Unique id of the model, this model is based on"""
-
- updated_at: Optional[datetime] = None
- """Last modified"""
-
- upload_complete: Optional[bool] = None
- """Model has been fully uploaded"""
-
- url: Optional[str] = None
- """Download url"""
-
- uuid: Optional[str] = None
- """Unique id"""
-
- version: Optional[ModelVersion] = None
- """Version Information about a Model"""
+__all__ = ["ModelListResponse"]
class ModelListResponse(BaseModel):
@@ -70,5 +17,5 @@ class ModelListResponse(BaseModel):
meta: Optional[APIMeta] = None
"""Meta information about the data set"""
- models: Optional[List[Model]] = None
+ models: Optional[List[APIModel]] = None
"""The models"""
diff --git a/src/do_gradientai/types/api_agent_model.py b/src/do_gradientai/types/api_agent_model.py
index 588f1cd0..f111bfb7 100644
--- a/src/do_gradientai/types/api_agent_model.py
+++ b/src/do_gradientai/types/api_agent_model.py
@@ -5,33 +5,14 @@
from typing_extensions import Literal
from .._models import BaseModel
+from .api_agreement import APIAgreement
+from .api_model_version import APIModelVersion
-__all__ = ["APIAgentModel", "Agreement", "Version"]
-
-
-class Agreement(BaseModel):
- description: Optional[str] = None
-
- name: Optional[str] = None
-
- url: Optional[str] = None
-
- uuid: Optional[str] = None
-
-
-class Version(BaseModel):
- major: Optional[int] = None
- """Major version number"""
-
- minor: Optional[int] = None
- """Minor version number"""
-
- patch: Optional[int] = None
- """Patch version number"""
+__all__ = ["APIAgentModel"]
class APIAgentModel(BaseModel):
- agreement: Optional[Agreement] = None
+ agreement: Optional[APIAgreement] = None
"""Agreement Description"""
created_at: Optional[datetime] = None
@@ -86,5 +67,5 @@ class APIAgentModel(BaseModel):
uuid: Optional[str] = None
"""Unique id"""
- version: Optional[Version] = None
+ version: Optional[APIModelVersion] = None
"""Version Information about a Model"""
diff --git a/src/do_gradientai/types/api_agreement.py b/src/do_gradientai/types/api_agreement.py
new file mode 100644
index 00000000..c4359f1f
--- /dev/null
+++ b/src/do_gradientai/types/api_agreement.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["APIAgreement"]
+
+
+class APIAgreement(BaseModel):
+ description: Optional[str] = None
+
+ name: Optional[str] = None
+
+ url: Optional[str] = None
+
+ uuid: Optional[str] = None
diff --git a/src/do_gradientai/types/api_model.py b/src/do_gradientai/types/api_model.py
new file mode 100644
index 00000000..7c530ee2
--- /dev/null
+++ b/src/do_gradientai/types/api_model.py
@@ -0,0 +1,42 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+
+from .._models import BaseModel
+from .api_agreement import APIAgreement
+from .api_model_version import APIModelVersion
+
+__all__ = ["APIModel"]
+
+
+class APIModel(BaseModel):
+ agreement: Optional[APIAgreement] = None
+ """Agreement Description"""
+
+ created_at: Optional[datetime] = None
+ """Creation date / time"""
+
+ is_foundational: Optional[bool] = None
+ """True if it is a foundational model provided by do"""
+
+ name: Optional[str] = None
+ """Name of the model"""
+
+ parent_uuid: Optional[str] = None
+ """Unique id of the model, this model is based on"""
+
+ updated_at: Optional[datetime] = None
+ """Last modified"""
+
+ upload_complete: Optional[bool] = None
+ """Model has been fully uploaded"""
+
+ url: Optional[str] = None
+ """Download url"""
+
+ uuid: Optional[str] = None
+ """Unique id"""
+
+ version: Optional[APIModelVersion] = None
+ """Version Information about a Model"""
diff --git a/src/do_gradientai/types/api_model_version.py b/src/do_gradientai/types/api_model_version.py
new file mode 100644
index 00000000..f19a78c6
--- /dev/null
+++ b/src/do_gradientai/types/api_model_version.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from .._models import BaseModel
+
+__all__ = ["APIModelVersion"]
+
+
+class APIModelVersion(BaseModel):
+ major: Optional[int] = None
+ """Major version number"""
+
+ minor: Optional[int] = None
+ """Minor version number"""
+
+ patch: Optional[int] = None
+ """Patch version number"""
diff --git a/src/do_gradientai/types/model_list_params.py b/src/do_gradientai/types/model_list_params.py
new file mode 100644
index 00000000..a2fa066a
--- /dev/null
+++ b/src/do_gradientai/types/model_list_params.py
@@ -0,0 +1,42 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["ModelListParams"]
+
+
+class ModelListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
+
+ public_only: bool
+ """Only include models that are publicly available."""
+
+ usecases: List[
+ Literal[
+ "MODEL_USECASE_UNKNOWN",
+ "MODEL_USECASE_AGENT",
+ "MODEL_USECASE_FINETUNED",
+ "MODEL_USECASE_KNOWLEDGEBASE",
+ "MODEL_USECASE_GUARDRAIL",
+ "MODEL_USECASE_REASONING",
+ "MODEL_USECASE_SERVERLESS",
+ ]
+ ]
+ """Include only models defined for the listed usecases.
+
+ - MODEL_USECASE_UNKNOWN: The use case of the model is unknown
+ - MODEL_USECASE_AGENT: The model maybe used in an agent
+ - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning
+ - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases
+ (embedding models)
+ - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails
+ - MODEL_USECASE_REASONING: The model usecase for reasoning
+ - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference
+ """
diff --git a/src/do_gradientai/types/model_list_response.py b/src/do_gradientai/types/model_list_response.py
new file mode 100644
index 00000000..12d95437
--- /dev/null
+++ b/src/do_gradientai/types/model_list_response.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+from .api_model import APIModel
+from .shared.api_meta import APIMeta
+from .shared.api_links import APILinks
+
+__all__ = ["ModelListResponse"]
+
+
+class ModelListResponse(BaseModel):
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
+
+ models: Optional[List[APIModel]] = None
+ """The models"""
diff --git a/src/do_gradientai/types/models/__init__.py b/src/do_gradientai/types/models/__init__.py
new file mode 100644
index 00000000..f8ee8b14
--- /dev/null
+++ b/src/do_gradientai/types/models/__init__.py
@@ -0,0 +1,3 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
diff --git a/src/do_gradientai/types/models/providers/__init__.py b/src/do_gradientai/types/models/providers/__init__.py
new file mode 100644
index 00000000..74366e70
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/__init__.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .openai_list_params import OpenAIListParams as OpenAIListParams
+from .openai_create_params import OpenAICreateParams as OpenAICreateParams
+from .openai_list_response import OpenAIListResponse as OpenAIListResponse
+from .openai_update_params import OpenAIUpdateParams as OpenAIUpdateParams
+from .anthropic_list_params import AnthropicListParams as AnthropicListParams
+from .openai_create_response import OpenAICreateResponse as OpenAICreateResponse
+from .openai_delete_response import OpenAIDeleteResponse as OpenAIDeleteResponse
+from .openai_update_response import OpenAIUpdateResponse as OpenAIUpdateResponse
+from .anthropic_create_params import AnthropicCreateParams as AnthropicCreateParams
+from .anthropic_list_response import AnthropicListResponse as AnthropicListResponse
+from .anthropic_update_params import AnthropicUpdateParams as AnthropicUpdateParams
+from .openai_retrieve_response import OpenAIRetrieveResponse as OpenAIRetrieveResponse
+from .anthropic_create_response import AnthropicCreateResponse as AnthropicCreateResponse
+from .anthropic_delete_response import AnthropicDeleteResponse as AnthropicDeleteResponse
+from .anthropic_update_response import AnthropicUpdateResponse as AnthropicUpdateResponse
+from .anthropic_retrieve_response import AnthropicRetrieveResponse as AnthropicRetrieveResponse
+from .anthropic_list_agents_params import AnthropicListAgentsParams as AnthropicListAgentsParams
+from .openai_retrieve_agents_params import OpenAIRetrieveAgentsParams as OpenAIRetrieveAgentsParams
+from .anthropic_list_agents_response import AnthropicListAgentsResponse as AnthropicListAgentsResponse
+from .openai_retrieve_agents_response import OpenAIRetrieveAgentsResponse as OpenAIRetrieveAgentsResponse
diff --git a/src/do_gradientai/types/models/providers/anthropic_create_params.py b/src/do_gradientai/types/models/providers/anthropic_create_params.py
new file mode 100644
index 00000000..c9fd6e85
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/anthropic_create_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["AnthropicCreateParams"]
+
+
+class AnthropicCreateParams(TypedDict, total=False):
+ api_key: str
+ """Anthropic API key"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/do_gradientai/types/models/providers/anthropic_create_response.py b/src/do_gradientai/types/models/providers/anthropic_create_response.py
new file mode 100644
index 00000000..0fbe50bc
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/anthropic_create_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["AnthropicCreateResponse"]
+
+
+class AnthropicCreateResponse(BaseModel):
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/do_gradientai/types/models/providers/anthropic_delete_response.py b/src/do_gradientai/types/models/providers/anthropic_delete_response.py
new file mode 100644
index 00000000..b4fdd978
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/anthropic_delete_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["AnthropicDeleteResponse"]
+
+
+class AnthropicDeleteResponse(BaseModel):
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/do_gradientai/types/models/providers/anthropic_list_agents_params.py b/src/do_gradientai/types/models/providers/anthropic_list_agents_params.py
new file mode 100644
index 00000000..b3308b69
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/anthropic_list_agents_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["AnthropicListAgentsParams"]
+
+
+class AnthropicListAgentsParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/do_gradientai/types/models/providers/anthropic_list_agents_response.py b/src/do_gradientai/types/models/providers/anthropic_list_agents_response.py
new file mode 100644
index 00000000..a1525275
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/anthropic_list_agents_response.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ...shared.api_meta import APIMeta
+from ...shared.api_links import APILinks
+
+__all__ = ["AnthropicListAgentsResponse"]
+
+
+class AnthropicListAgentsResponse(BaseModel):
+ agents: Optional[List["APIAgent"]] = None
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
+
+
+from ...api_agent import APIAgent
diff --git a/src/do_gradientai/types/models/providers/anthropic_list_params.py b/src/do_gradientai/types/models/providers/anthropic_list_params.py
new file mode 100644
index 00000000..ae1cca58
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/anthropic_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["AnthropicListParams"]
+
+
+class AnthropicListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/do_gradientai/types/models/providers/anthropic_list_response.py b/src/do_gradientai/types/models/providers/anthropic_list_response.py
new file mode 100644
index 00000000..24d6547a
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/anthropic_list_response.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ...shared.api_meta import APIMeta
+from ...shared.api_links import APILinks
+from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["AnthropicListResponse"]
+
+
+class AnthropicListResponse(BaseModel):
+ api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None
+ """Api key infos"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/do_gradientai/types/models/providers/anthropic_retrieve_response.py b/src/do_gradientai/types/models/providers/anthropic_retrieve_response.py
new file mode 100644
index 00000000..61324b7d
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/anthropic_retrieve_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["AnthropicRetrieveResponse"]
+
+
+class AnthropicRetrieveResponse(BaseModel):
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/do_gradientai/types/models/providers/anthropic_update_params.py b/src/do_gradientai/types/models/providers/anthropic_update_params.py
new file mode 100644
index 00000000..865dc29c
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/anthropic_update_params.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ...._utils import PropertyInfo
+
+__all__ = ["AnthropicUpdateParams"]
+
+
+class AnthropicUpdateParams(TypedDict, total=False):
+ api_key: str
+ """Anthropic API key"""
+
+ body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
+ """API key ID"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/do_gradientai/types/models/providers/anthropic_update_response.py b/src/do_gradientai/types/models/providers/anthropic_update_response.py
new file mode 100644
index 00000000..3a6daaea
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/anthropic_update_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["AnthropicUpdateResponse"]
+
+
+class AnthropicUpdateResponse(BaseModel):
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
+ """Anthropic API Key Info"""
diff --git a/src/do_gradientai/types/models/providers/openai_create_params.py b/src/do_gradientai/types/models/providers/openai_create_params.py
new file mode 100644
index 00000000..8ed7f571
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/openai_create_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["OpenAICreateParams"]
+
+
+class OpenAICreateParams(TypedDict, total=False):
+ api_key: str
+ """OpenAI API key"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/do_gradientai/types/models/providers/openai_create_response.py b/src/do_gradientai/types/models/providers/openai_create_response.py
new file mode 100644
index 00000000..b2e94766
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/openai_create_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["OpenAICreateResponse"]
+
+
+class OpenAICreateResponse(BaseModel):
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/src/do_gradientai/types/models/providers/openai_delete_response.py b/src/do_gradientai/types/models/providers/openai_delete_response.py
new file mode 100644
index 00000000..e59c89fe
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/openai_delete_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["OpenAIDeleteResponse"]
+
+
+class OpenAIDeleteResponse(BaseModel):
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/src/do_gradientai/types/models/providers/openai_list_params.py b/src/do_gradientai/types/models/providers/openai_list_params.py
new file mode 100644
index 00000000..5677eeaf
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/openai_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["OpenAIListParams"]
+
+
+class OpenAIListParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/do_gradientai/types/models/providers/openai_list_response.py b/src/do_gradientai/types/models/providers/openai_list_response.py
new file mode 100644
index 00000000..698cd11e
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/openai_list_response.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ...shared.api_meta import APIMeta
+from ...shared.api_links import APILinks
+from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["OpenAIListResponse"]
+
+
+class OpenAIListResponse(BaseModel):
+ api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None
+ """Api key infos"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py b/src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py
new file mode 100644
index 00000000..2db6d7a1
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["OpenAIRetrieveAgentsParams"]
+
+
+class OpenAIRetrieveAgentsParams(TypedDict, total=False):
+ page: int
+ """Page number."""
+
+ per_page: int
+ """Items per page."""
diff --git a/src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py b/src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py
new file mode 100644
index 00000000..717a56cd
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ...shared.api_meta import APIMeta
+from ...shared.api_links import APILinks
+
+__all__ = ["OpenAIRetrieveAgentsResponse"]
+
+
+class OpenAIRetrieveAgentsResponse(BaseModel):
+ agents: Optional[List["APIAgent"]] = None
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
+
+
+from ...api_agent import APIAgent
diff --git a/src/do_gradientai/types/models/providers/openai_retrieve_response.py b/src/do_gradientai/types/models/providers/openai_retrieve_response.py
new file mode 100644
index 00000000..0f382073
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/openai_retrieve_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["OpenAIRetrieveResponse"]
+
+
+class OpenAIRetrieveResponse(BaseModel):
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/src/do_gradientai/types/models/providers/openai_update_params.py b/src/do_gradientai/types/models/providers/openai_update_params.py
new file mode 100644
index 00000000..9b99495e
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/openai_update_params.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ...._utils import PropertyInfo
+
+__all__ = ["OpenAIUpdateParams"]
+
+
+class OpenAIUpdateParams(TypedDict, total=False):
+ api_key: str
+ """OpenAI API key"""
+
+ body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
+ """API key ID"""
+
+ name: str
+ """Name of the key"""
diff --git a/src/do_gradientai/types/models/providers/openai_update_response.py b/src/do_gradientai/types/models/providers/openai_update_response.py
new file mode 100644
index 00000000..ec7a1c94
--- /dev/null
+++ b/src/do_gradientai/types/models/providers/openai_update_response.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["OpenAIUpdateResponse"]
+
+
+class OpenAIUpdateResponse(BaseModel):
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
+ """OpenAI API Key Info"""
diff --git a/tests/api_resources/models/__init__.py b/tests/api_resources/models/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/models/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/models/providers/__init__.py b/tests/api_resources/models/providers/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/models/providers/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/models/providers/test_anthropic.py b/tests/api_resources/models/providers/test_anthropic.py
new file mode 100644
index 00000000..6b3d99a3
--- /dev/null
+++ b/tests/api_resources/models/providers/test_anthropic.py
@@ -0,0 +1,557 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.models.providers import (
+ AnthropicListResponse,
+ AnthropicCreateResponse,
+ AnthropicDeleteResponse,
+ AnthropicUpdateResponse,
+ AnthropicRetrieveResponse,
+ AnthropicListAgentsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestAnthropic:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create(self, client: GradientAI) -> None:
+ anthropic = client.models.providers.anthropic.create()
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create_with_all_params(self, client: GradientAI) -> None:
+ anthropic = client.models.providers.anthropic.create(
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_create(self, client: GradientAI) -> None:
+ response = client.models.providers.anthropic.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = response.parse()
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_create(self, client: GradientAI) -> None:
+ with client.models.providers.anthropic.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = response.parse()
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_retrieve(self, client: GradientAI) -> None:
+ anthropic = client.models.providers.anthropic.retrieve(
+ "api_key_uuid",
+ )
+ assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_retrieve(self, client: GradientAI) -> None:
+ response = client.models.providers.anthropic.with_raw_response.retrieve(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = response.parse()
+ assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_retrieve(self, client: GradientAI) -> None:
+ with client.models.providers.anthropic.with_streaming_response.retrieve(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = response.parse()
+ assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_retrieve(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.models.providers.anthropic.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update(self, client: GradientAI) -> None:
+ anthropic = client.models.providers.anthropic.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update_with_all_params(self, client: GradientAI) -> None:
+ anthropic = client.models.providers.anthropic.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_update(self, client: GradientAI) -> None:
+ response = client.models.providers.anthropic.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = response.parse()
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_update(self, client: GradientAI) -> None:
+ with client.models.providers.anthropic.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = response.parse()
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_update(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ client.models.providers.anthropic.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list(self, client: GradientAI) -> None:
+ anthropic = client.models.providers.anthropic.list()
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_with_all_params(self, client: GradientAI) -> None:
+ anthropic = client.models.providers.anthropic.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list(self, client: GradientAI) -> None:
+ response = client.models.providers.anthropic.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = response.parse()
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list(self, client: GradientAI) -> None:
+ with client.models.providers.anthropic.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = response.parse()
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_delete(self, client: GradientAI) -> None:
+ anthropic = client.models.providers.anthropic.delete(
+ "api_key_uuid",
+ )
+ assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_delete(self, client: GradientAI) -> None:
+ response = client.models.providers.anthropic.with_raw_response.delete(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = response.parse()
+ assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_delete(self, client: GradientAI) -> None:
+ with client.models.providers.anthropic.with_streaming_response.delete(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = response.parse()
+ assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_delete(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.models.providers.anthropic.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_agents(self, client: GradientAI) -> None:
+ anthropic = client.models.providers.anthropic.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_agents_with_all_params(self, client: GradientAI) -> None:
+ anthropic = client.models.providers.anthropic.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list_agents(self, client: GradientAI) -> None:
+ response = client.models.providers.anthropic.with_raw_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = response.parse()
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list_agents(self, client: GradientAI) -> None:
+ with client.models.providers.anthropic.with_streaming_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = response.parse()
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_list_agents(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.models.providers.anthropic.with_raw_response.list_agents(
+ uuid="",
+ )
+
+
+class TestAsyncAnthropic:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradientAI) -> None:
+ anthropic = await async_client.models.providers.anthropic.create()
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ anthropic = await async_client.models.providers.anthropic.create(
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.models.providers.anthropic.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.models.providers.anthropic.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
+ anthropic = await async_client.models.providers.anthropic.retrieve(
+ "api_key_uuid",
+ )
+ assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.models.providers.anthropic.with_raw_response.retrieve(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.models.providers.anthropic.with_streaming_response.retrieve(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicRetrieveResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.models.providers.anthropic.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradientAI) -> None:
+ anthropic = await async_client.models.providers.anthropic.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ anthropic = await async_client.models.providers.anthropic.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.models.providers.anthropic.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.models.providers.anthropic.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ await async_client.models.providers.anthropic.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradientAI) -> None:
+ anthropic = await async_client.models.providers.anthropic.list()
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ anthropic = await async_client.models.providers.anthropic.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.models.providers.anthropic.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.models.providers.anthropic.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicListResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
+ anthropic = await async_client.models.providers.anthropic.delete(
+ "api_key_uuid",
+ )
+ assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.models.providers.anthropic.with_raw_response.delete(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.models.providers.anthropic.with_streaming_response.delete(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicDeleteResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.models.providers.anthropic.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None:
+ anthropic = await async_client.models.providers.anthropic.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ anthropic = await async_client.models.providers.anthropic.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.models.providers.anthropic.with_raw_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.models.providers.anthropic.with_streaming_response.list_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ anthropic = await response.parse()
+ assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.models.providers.anthropic.with_raw_response.list_agents(
+ uuid="",
+ )
diff --git a/tests/api_resources/models/providers/test_openai.py b/tests/api_resources/models/providers/test_openai.py
new file mode 100644
index 00000000..bdde97ca
--- /dev/null
+++ b/tests/api_resources/models/providers/test_openai.py
@@ -0,0 +1,557 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.models.providers import (
+ OpenAIListResponse,
+ OpenAICreateResponse,
+ OpenAIDeleteResponse,
+ OpenAIUpdateResponse,
+ OpenAIRetrieveResponse,
+ OpenAIRetrieveAgentsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestOpenAI:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create(self, client: GradientAI) -> None:
+ openai = client.models.providers.openai.create()
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create_with_all_params(self, client: GradientAI) -> None:
+ openai = client.models.providers.openai.create(
+ api_key='"sk-proj--123456789098765432123456789"',
+ name='"Production Key"',
+ )
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_create(self, client: GradientAI) -> None:
+ response = client.models.providers.openai.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = response.parse()
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_create(self, client: GradientAI) -> None:
+ with client.models.providers.openai.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = response.parse()
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_retrieve(self, client: GradientAI) -> None:
+ openai = client.models.providers.openai.retrieve(
+ "api_key_uuid",
+ )
+ assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_retrieve(self, client: GradientAI) -> None:
+ response = client.models.providers.openai.with_raw_response.retrieve(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = response.parse()
+ assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_retrieve(self, client: GradientAI) -> None:
+ with client.models.providers.openai.with_streaming_response.retrieve(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = response.parse()
+ assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_retrieve(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.models.providers.openai.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update(self, client: GradientAI) -> None:
+ openai = client.models.providers.openai.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update_with_all_params(self, client: GradientAI) -> None:
+ openai = client.models.providers.openai.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_update(self, client: GradientAI) -> None:
+ response = client.models.providers.openai.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = response.parse()
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_update(self, client: GradientAI) -> None:
+ with client.models.providers.openai.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = response.parse()
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_update(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ client.models.providers.openai.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list(self, client: GradientAI) -> None:
+ openai = client.models.providers.openai.list()
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_with_all_params(self, client: GradientAI) -> None:
+ openai = client.models.providers.openai.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list(self, client: GradientAI) -> None:
+ response = client.models.providers.openai.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = response.parse()
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list(self, client: GradientAI) -> None:
+ with client.models.providers.openai.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = response.parse()
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_delete(self, client: GradientAI) -> None:
+ openai = client.models.providers.openai.delete(
+ "api_key_uuid",
+ )
+ assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_delete(self, client: GradientAI) -> None:
+ response = client.models.providers.openai.with_raw_response.delete(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = response.parse()
+ assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_delete(self, client: GradientAI) -> None:
+ with client.models.providers.openai.with_streaming_response.delete(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = response.parse()
+ assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_delete(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.models.providers.openai.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_retrieve_agents(self, client: GradientAI) -> None:
+ openai = client.models.providers.openai.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> None:
+ openai = client.models.providers.openai.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_retrieve_agents(self, client: GradientAI) -> None:
+ response = client.models.providers.openai.with_raw_response.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = response.parse()
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None:
+ with client.models.providers.openai.with_streaming_response.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = response.parse()
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_retrieve_agents(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.models.providers.openai.with_raw_response.retrieve_agents(
+ uuid="",
+ )
+
+
+class TestAsyncOpenAI:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradientAI) -> None:
+ openai = await async_client.models.providers.openai.create()
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ openai = await async_client.models.providers.openai.create(
+ api_key='"sk-proj--123456789098765432123456789"',
+ name='"Production Key"',
+ )
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.models.providers.openai.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = await response.parse()
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.models.providers.openai.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = await response.parse()
+ assert_matches_type(OpenAICreateResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
+ openai = await async_client.models.providers.openai.retrieve(
+ "api_key_uuid",
+ )
+ assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.models.providers.openai.with_raw_response.retrieve(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = await response.parse()
+ assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.models.providers.openai.with_streaming_response.retrieve(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = await response.parse()
+ assert_matches_type(OpenAIRetrieveResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.models.providers.openai.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradientAI) -> None:
+ openai = await async_client.models.providers.openai.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ openai = await async_client.models.providers.openai.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ api_key='"sk-ant-12345678901234567890123456789012"',
+ body_api_key_uuid='"12345678-1234-1234-1234-123456789012"',
+ name='"Production Key"',
+ )
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.models.providers.openai.with_raw_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = await response.parse()
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.models.providers.openai.with_streaming_response.update(
+ path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = await response.parse()
+ assert_matches_type(OpenAIUpdateResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ await async_client.models.providers.openai.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradientAI) -> None:
+ openai = await async_client.models.providers.openai.list()
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ openai = await async_client.models.providers.openai.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.models.providers.openai.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = await response.parse()
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.models.providers.openai.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = await response.parse()
+ assert_matches_type(OpenAIListResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
+ openai = await async_client.models.providers.openai.delete(
+ "api_key_uuid",
+ )
+ assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.models.providers.openai.with_raw_response.delete(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = await response.parse()
+ assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.models.providers.openai.with_streaming_response.delete(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = await response.parse()
+ assert_matches_type(OpenAIDeleteResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.models.providers.openai.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
+ openai = await async_client.models.providers.openai.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ openai = await async_client.models.providers.openai.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.models.providers.openai.with_raw_response.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ openai = await response.parse()
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.models.providers.openai.with_streaming_response.retrieve_agents(
+ uuid='"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ openai = await response.parse()
+ assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.models.providers.openai.with_raw_response.retrieve_agents(
+ uuid="",
+ )
diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py
new file mode 100644
index 00000000..f7e21015
--- /dev/null
+++ b/tests/api_resources/test_models.py
@@ -0,0 +1,102 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types import ModelListResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestModels:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list(self, client: GradientAI) -> None:
+ model = client.models.list()
+ assert_matches_type(ModelListResponse, model, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_with_all_params(self, client: GradientAI) -> None:
+ model = client.models.list(
+ page=0,
+ per_page=0,
+ public_only=True,
+ usecases=["MODEL_USECASE_UNKNOWN"],
+ )
+ assert_matches_type(ModelListResponse, model, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list(self, client: GradientAI) -> None:
+ response = client.models.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ model = response.parse()
+ assert_matches_type(ModelListResponse, model, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list(self, client: GradientAI) -> None:
+ with client.models.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ model = response.parse()
+ assert_matches_type(ModelListResponse, model, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncModels:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradientAI) -> None:
+ model = await async_client.models.list()
+ assert_matches_type(ModelListResponse, model, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ model = await async_client.models.list(
+ page=0,
+ per_page=0,
+ public_only=True,
+ usecases=["MODEL_USECASE_UNKNOWN"],
+ )
+ assert_matches_type(ModelListResponse, model, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.models.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ model = await response.parse()
+ assert_matches_type(ModelListResponse, model, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.models.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ model = await response.parse()
+ assert_matches_type(ModelListResponse, model, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
From 2dee5917c6907c1bd6dd22724160632289559131 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 29 Jul 2025 19:20:12 +0000
Subject: [PATCH 06/10] codegen metadata
---
.stats.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.stats.yml b/.stats.yml
index 1a7efa56..9a868058 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 170
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-015417b36365dfcb32166e67379c38de8bf5127c33dff646097a819a7b4dc588.yml
openapi_spec_hash: d7d811c13cc79f15d82fe680cf425859
-config_hash: 9e90c31a8b4d524c6714fa969828ecc8
+config_hash: 3ad1734779befb065101197f2f35568c
From 56231161358e9f53388b0c60c07cb7c42edbed73 Mon Sep 17 00:00:00 2001
From: Ben Batha
Date: Tue, 29 Jul 2025 15:32:13 -0400
Subject: [PATCH 07/10] fix lints
---
src/do_gradientai/_client.py | 121 ++++++++++++++++++++++++-----------
1 file changed, 82 insertions(+), 39 deletions(-)
diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py
index 9bd9d0c7..959e30f3 100644
--- a/src/do_gradientai/_client.py
+++ b/src/do_gradientai/_client.py
@@ -32,20 +32,21 @@
)
if TYPE_CHECKING:
- from .resources import chat, agents, models, regions, databases, inference, gpu_droplets, knowledge_bases
+ from .resources import (
+ chat,
+ agents,
+ models,
+ regions,
+ databases,
+ inference,
+ gpu_droplets,
+ knowledge_bases,
+ )
from .resources.regions import RegionsResource, AsyncRegionsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
from .resources.gpu_droplets import (
GPUDropletsResource,
AsyncGPUDropletsResource,
- sizes,
- images,
- account,
- volumes,
- firewalls,
- snapshots,
- floating_ips,
- load_balancers,
)
from .resources.agents.agents import AgentsResource, AsyncAgentsResource
from .resources.models.models import ModelsResource, AsyncModelsResource
@@ -53,19 +54,15 @@
from .resources.inference.inference import InferenceResource, AsyncInferenceResource
from .resources.gpu_droplets.snapshots import (
SnapshotsResource,
- AsyncSnapshotsResource,
)
from .resources.gpu_droplets.images.images import (
ImagesResource,
- AsyncImagesResource,
)
from .resources.gpu_droplets.account.account import (
AccountResource,
- AsyncAccountResource,
)
from .resources.gpu_droplets.volumes.volumes import (
VolumesResource,
- AsyncVolumesResource,
)
from .resources.knowledge_bases.knowledge_bases import (
KnowledgeBasesResource,
@@ -73,15 +70,12 @@
)
from .resources.gpu_droplets.firewalls.firewalls import (
FirewallsResource,
- AsyncFirewallsResource,
)
from .resources.gpu_droplets.floating_ips.floating_ips import (
FloatingIPsResource,
- AsyncFloatingIPsResource,
)
from .resources.gpu_droplets.load_balancers.load_balancers import (
LoadBalancersResource,
- AsyncLoadBalancersResource,
)
__all__ = [
@@ -310,7 +304,9 @@ def default_headers(self) -> dict[str, str | Omit]:
@override
def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
- if (self.api_key or self.agent_key or self.inference_key) and headers.get("Authorization"):
+ if (self.api_key or self.agent_key or self.inference_key) and headers.get(
+ "Authorization"
+ ):
return
if isinstance(custom_headers.get("Authorization"), Omit):
return
@@ -340,10 +336,14 @@ def copy(
Create a new client instance re-using the same options given to the current client with optional overriding.
"""
if default_headers is not None and set_default_headers is not None:
- raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
+ raise ValueError(
+ "The `default_headers` and `set_default_headers` arguments are mutually exclusive"
+ )
if default_query is not None and set_default_query is not None:
- raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
+ raise ValueError(
+ "The `default_query` and `set_default_query` arguments are mutually exclusive"
+ )
headers = self._custom_headers
if default_headers is not None:
@@ -390,10 +390,14 @@ def _make_status_error(
return _exceptions.BadRequestError(err_msg, response=response, body=body)
if response.status_code == 401:
- return _exceptions.AuthenticationError(err_msg, response=response, body=body)
+ return _exceptions.AuthenticationError(
+ err_msg, response=response, body=body
+ )
if response.status_code == 403:
- return _exceptions.PermissionDeniedError(err_msg, response=response, body=body)
+ return _exceptions.PermissionDeniedError(
+ err_msg, response=response, body=body
+ )
if response.status_code == 404:
return _exceptions.NotFoundError(err_msg, response=response, body=body)
@@ -402,13 +406,17 @@ def _make_status_error(
return _exceptions.ConflictError(err_msg, response=response, body=body)
if response.status_code == 422:
- return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body)
+ return _exceptions.UnprocessableEntityError(
+ err_msg, response=response, body=body
+ )
if response.status_code == 429:
return _exceptions.RateLimitError(err_msg, response=response, body=body)
if response.status_code >= 500:
- return _exceptions.InternalServerError(err_msg, response=response, body=body)
+ return _exceptions.InternalServerError(
+ err_msg, response=response, body=body
+ )
return APIStatusError(err_msg, response=response, body=body)
@@ -578,7 +586,9 @@ def default_headers(self) -> dict[str, str | Omit]:
@override
def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
- if (self.api_key or self.agent_key or self.inference_key) and headers.get("Authorization"):
+ if (self.api_key or self.agent_key or self.inference_key) and headers.get(
+ "Authorization"
+ ):
return
if isinstance(custom_headers.get("Authorization"), Omit):
return
@@ -608,10 +618,14 @@ def copy(
Create a new client instance re-using the same options given to the current client with optional overriding.
"""
if default_headers is not None and set_default_headers is not None:
- raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
+ raise ValueError(
+ "The `default_headers` and `set_default_headers` arguments are mutually exclusive"
+ )
if default_query is not None and set_default_query is not None:
- raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
+ raise ValueError(
+ "The `default_query` and `set_default_query` arguments are mutually exclusive"
+ )
headers = self._custom_headers
if default_headers is not None:
@@ -658,10 +672,14 @@ def _make_status_error(
return _exceptions.BadRequestError(err_msg, response=response, body=body)
if response.status_code == 401:
- return _exceptions.AuthenticationError(err_msg, response=response, body=body)
+ return _exceptions.AuthenticationError(
+ err_msg, response=response, body=body
+ )
if response.status_code == 403:
- return _exceptions.PermissionDeniedError(err_msg, response=response, body=body)
+ return _exceptions.PermissionDeniedError(
+ err_msg, response=response, body=body
+ )
if response.status_code == 404:
return _exceptions.NotFoundError(err_msg, response=response, body=body)
@@ -670,13 +688,17 @@ def _make_status_error(
return _exceptions.ConflictError(err_msg, response=response, body=body)
if response.status_code == 422:
- return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body)
+ return _exceptions.UnprocessableEntityError(
+ err_msg, response=response, body=body
+ )
if response.status_code == 429:
return _exceptions.RateLimitError(err_msg, response=response, body=body)
if response.status_code >= 500:
- return _exceptions.InternalServerError(err_msg, response=response, body=body)
+ return _exceptions.InternalServerError(
+ err_msg, response=response, body=body
+ )
return APIStatusError(err_msg, response=response, body=body)
@@ -734,6 +756,7 @@ def databases(self) -> databases.DatabasesResourceWithRawResponse:
return DatabasesResourceWithRawResponse(self._client.databases)
+
class AsyncGradientAIWithRawResponse:
_client: AsyncGradientAI
@@ -765,8 +788,12 @@ def inference(self) -> inference.AsyncInferenceResourceWithRawResponse:
return AsyncInferenceResourceWithRawResponse(self._client.inference)
@cached_property
- def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse:
- from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithRawResponse
+ def knowledge_bases(
+ self,
+ ) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse:
+ from .resources.knowledge_bases import (
+ AsyncKnowledgeBasesResourceWithRawResponse,
+ )
return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases)
@@ -788,6 +815,7 @@ def databases(self) -> databases.AsyncDatabasesResourceWithRawResponse:
return AsyncDatabasesResourceWithRawResponse(self._client.databases)
+
class GradientAIWithStreamedResponse:
_client: GradientAI
@@ -819,8 +847,12 @@ def inference(self) -> inference.InferenceResourceWithStreamingResponse:
return InferenceResourceWithStreamingResponse(self._client.inference)
@cached_property
- def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse:
- from .resources.knowledge_bases import KnowledgeBasesResourceWithStreamingResponse
+ def knowledge_bases(
+ self,
+ ) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse:
+ from .resources.knowledge_bases import (
+ KnowledgeBasesResourceWithStreamingResponse,
+ )
return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases)
@@ -842,6 +874,7 @@ def databases(self) -> databases.DatabasesResourceWithStreamingResponse:
return DatabasesResourceWithStreamingResponse(self._client.databases)
+
class AsyncGradientAIWithStreamedResponse:
_client: AsyncGradientAI
@@ -861,8 +894,12 @@ def chat(self) -> chat.AsyncChatResourceWithStreamingResponse:
return AsyncChatResourceWithStreamingResponse(self._client.chat)
@cached_property
- def gpu_droplets(self) -> gpu_droplets.AsyncGPUDropletsResourceWithStreamingResponse:
- from .resources.gpu_droplets import AsyncGPUDropletsResourceWithStreamingResponse
+ def gpu_droplets(
+ self,
+ ) -> gpu_droplets.AsyncGPUDropletsResourceWithStreamingResponse:
+ from .resources.gpu_droplets import (
+ AsyncGPUDropletsResourceWithStreamingResponse,
+ )
return AsyncGPUDropletsResourceWithStreamingResponse(self._client.gpu_droplets)
@@ -873,10 +910,16 @@ def inference(self) -> inference.AsyncInferenceResourceWithStreamingResponse:
return AsyncInferenceResourceWithStreamingResponse(self._client.inference)
@cached_property
- def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse:
- from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithStreamingResponse
+ def knowledge_bases(
+ self,
+ ) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse:
+ from .resources.knowledge_bases import (
+ AsyncKnowledgeBasesResourceWithStreamingResponse,
+ )
- return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases)
+ return AsyncKnowledgeBasesResourceWithStreamingResponse(
+ self._client.knowledge_bases
+ )
@cached_property
def models(self) -> models.AsyncModelsResourceWithStreamingResponse:
From e10f211e8c0be26d0e4e34b5ac0df24d5572fedc Mon Sep 17 00:00:00 2001
From: Ben Batha
Date: Tue, 29 Jul 2025 15:39:00 -0400
Subject: [PATCH 08/10] fix lints
---
src/do_gradientai/_client.py | 3 +++
src/do_gradientai/types/__init__.py | 18 ++++++++++++++++--
.../agents/evaluation_metrics/__init__.py | 6 +++++-
3 files changed, 24 insertions(+), 3 deletions(-)
diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py
index 959e30f3..691dfc21 100644
--- a/src/do_gradientai/_client.py
+++ b/src/do_gradientai/_client.py
@@ -50,6 +50,9 @@
)
from .resources.agents.agents import AgentsResource, AsyncAgentsResource
from .resources.models.models import ModelsResource, AsyncModelsResource
+ from .resources.gpu_droplets.sizes import (
+ SizesResource,
+ )
from .resources.databases.databases import DatabasesResource, AsyncDatabasesResource
from .resources.inference.inference import InferenceResource, AsyncInferenceResource
from .resources.gpu_droplets.snapshots import (
diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py
index 0f5da788..6d0ffdd6 100644
--- a/src/do_gradientai/types/__init__.py
+++ b/src/do_gradientai/types/__init__.py
@@ -69,7 +69,13 @@
from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse
from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo
from .gpu_droplet_list_params import GPUDropletListParams as GPUDropletListParams
-from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility
+from .agents.evaluation_metrics import (
+ openai, # type: ignore # noqa: F401
+ anthropic, # type: ignore # noqa: F401
+)
+from .api_deployment_visibility import (
+ APIDeploymentVisibility as APIDeploymentVisibility,
+)
from .gpu_droplet_create_params import GPUDropletCreateParams as GPUDropletCreateParams
from .gpu_droplet_list_response import GPUDropletListResponse as GPUDropletListResponse
from .agent_update_status_params import (
@@ -112,6 +118,9 @@
from .gpu_droplet_list_kernels_params import (
GPUDropletListKernelsParams as GPUDropletListKernelsParams,
)
+from .agents.evaluation_metrics.openai import (
+ key_list_agents_response, # type: ignore # noqa: F401
+)
from .gpu_droplet_delete_by_tag_params import (
GPUDropletDeleteByTagParams as GPUDropletDeleteByTagParams,
)
@@ -127,6 +136,9 @@
from .gpu_droplet_list_snapshots_params import (
GPUDropletListSnapshotsParams as GPUDropletListSnapshotsParams,
)
+from .agents.evaluation_metrics.anthropic import (
+ key_list_agents_response, # type: ignore # noqa: F401
+)
from .gpu_droplet_list_firewalls_response import (
GPUDropletListFirewallsResponse as GPUDropletListFirewallsResponse,
)
@@ -199,7 +211,9 @@
agents.route_view_response.RouteViewResponse.model_rebuild(
_parent_namespace_depth=0
)
- models.providers.anthropic_list_agents_response.AnthropicListAgentsResponse.model_rebuild(_parent_namespace_depth=0)
+ models.providers.anthropic_list_agents_response.AnthropicListAgentsResponse.model_rebuild(
+ _parent_namespace_depth=0
+ )
models.providers.openai_retrieve_agents_response.OpenAIRetrieveAgentsResponse.model_rebuild(
_parent_namespace_depth=0
)
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/__init__.py b/src/do_gradientai/types/agents/evaluation_metrics/__init__.py
index 94afaed5..12ca91f3 100644
--- a/src/do_gradientai/types/agents/evaluation_metrics/__init__.py
+++ b/src/do_gradientai/types/agents/evaluation_metrics/__init__.py
@@ -2,7 +2,11 @@
from __future__ import annotations
-from . import workspaces # type: ignore # noqa: F401
+from . import (
+ openai, # type: ignore # noqa: F401
+ anthropic, # type: ignore # noqa: F401
+ workspaces, # type: ignore # noqa: F401
+)
from .model_list_params import ModelListParams as ModelListParams
from .model_list_response import ModelListResponse as ModelListResponse
from .workspace_create_params import WorkspaceCreateParams as WorkspaceCreateParams
From ab28eee48aadede5b5e189550cba352495f00659 Mon Sep 17 00:00:00 2001
From: Ben Batha
Date: Tue, 29 Jul 2025 15:41:54 -0400
Subject: [PATCH 09/10] fix lints
---
src/do_gradientai/types/__init__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py
index 6d0ffdd6..9470b525 100644
--- a/src/do_gradientai/types/__init__.py
+++ b/src/do_gradientai/types/__init__.py
@@ -137,7 +137,7 @@
GPUDropletListSnapshotsParams as GPUDropletListSnapshotsParams,
)
from .agents.evaluation_metrics.anthropic import (
- key_list_agents_response, # type: ignore # noqa: F401
+ key_list_response, # type: ignore # noqa: F401
)
from .gpu_droplet_list_firewalls_response import (
GPUDropletListFirewallsResponse as GPUDropletListFirewallsResponse,
From 450c459bd69b46f05f05e3d68ad94600ebb6c842 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 29 Jul 2025 19:45:01 +0000
Subject: [PATCH 10/10] release: 0.1.0-beta.4
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 11 +++++++++++
pyproject.toml | 2 +-
src/do_gradientai/_version.py | 2 +-
4 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index bf7fe4fa..ce692f94 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.1.0-beta.3"
+ ".": "0.1.0-beta.4"
}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2eeef144..2a4a3c4e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,16 @@
# Changelog
+## 0.1.0-beta.4 (2025-07-29)
+
+Full Changelog: [v0.1.0-beta.3...v0.1.0-beta.4](https://github.com/digitalocean/gradientai-python/compare/v0.1.0-beta.3...v0.1.0-beta.4)
+
+### Features
+
+* **api:** update via SDK Studio ([3018b4c](https://github.com/digitalocean/gradientai-python/commit/3018b4cc758839eda46617170a24f181d9a0b70b))
+* **api:** update via SDK Studio ([4292abf](https://github.com/digitalocean/gradientai-python/commit/4292abf5ba2e89dedf7f7660f6e274e42a163ae0))
+* **api:** update via SDK Studio ([2252d77](https://github.com/digitalocean/gradientai-python/commit/2252d77e753a1407a1b851e01f4dcdbf1d4e0697))
+* **api:** update via SDK Studio ([7d7d879](https://github.com/digitalocean/gradientai-python/commit/7d7d879480a1d85ac8329cb98fa8da8afd8fee12))
+
## 0.1.0-beta.3 (2025-07-25)
Full Changelog: [v0.1.0-beta.2...v0.1.0-beta.3](https://github.com/digitalocean/gradientai-python/compare/v0.1.0-beta.2...v0.1.0-beta.3)
diff --git a/pyproject.toml b/pyproject.toml
index 74463764..7bb32f6f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "do_gradientai"
-version = "0.1.0-beta.3"
+version = "0.1.0-beta.4"
description = "The official Python library for GradientAI"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/do_gradientai/_version.py b/src/do_gradientai/_version.py
index 2789c067..a0ffe639 100644
--- a/src/do_gradientai/_version.py
+++ b/src/do_gradientai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "do_gradientai"
-__version__ = "0.1.0-beta.3" # x-release-please-version
+__version__ = "0.1.0-beta.4" # x-release-please-version